repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
huhuikevin/kernel_imx | kernel/srcu.c | 267 | 10675 | /*
* Sleepable Read-Copy Update mechanism for mutual exclusion.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
*
* Author: Paul McKenney <paulmck@us.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU/ *.txt
*
*/
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/srcu.h>
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
mutex_init(&sp->mutex);
sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
return sp->per_cpu_ref ? 0 : -ENOMEM;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/**
* init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
int init_srcu_struct(struct srcu_struct *sp)
{
return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/*
* srcu_readers_active_idx -- returns approximate number of readers
* active on the specified rank of per-CPU counters.
*/
static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
{
int cpu;
int sum;
sum = 0;
for_each_possible_cpu(cpu)
sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx];
return sum;
}
/**
* srcu_readers_active - returns approximate number of readers.
* @sp: which srcu_struct to count active readers (holding srcu_read_lock).
*
* Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
static int srcu_readers_active(struct srcu_struct *sp)
{
return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
}
/**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
void cleanup_srcu_struct(struct srcu_struct *sp)
{
int sum;
sum = srcu_readers_active(sp);
WARN_ON(sum); /* Leakage unless caller handles error. */
if (sum != 0)
return;
free_percpu(sp->per_cpu_ref);
sp->per_cpu_ref = NULL;
}
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct. Must be called from process context.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *sp)
{
int idx;
preempt_disable();
idx = sp->completed & 0x1;
barrier(); /* ensure compiler looks -once- at sp->completed. */
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++;
srcu_barrier(); /* ensure compiler won't misorder critical section. */
preempt_enable();
return idx;
}
EXPORT_SYMBOL_GPL(__srcu_read_lock);
/*
* Removes the count for the old reader from the appropriate per-CPU
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
* Must be called from process context.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
preempt_disable();
srcu_barrier(); /* ensure compiler won't misorder critical section. */
per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
preempt_enable();
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* We use an adaptive strategy for synchronize_srcu() and especially for
* synchronize_srcu_expedited(). We spin for a fixed time period
* (defined below) to allow SRCU readers to exit their read-side critical
* sections. If there are still some readers after 10 microseconds,
* we repeatedly block for 1-millisecond time periods. This approach
* has done well in testing, so there is no need for a config parameter.
*/
#define SYNCHRONIZE_SRCU_READER_DELAY 10
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
{
int idx;
idx = sp->completed;
mutex_lock(&sp->mutex);
/*
* Check to see if someone else did the work for us while we were
* waiting to acquire the lock. We need -two- advances of
* the counter, not just one. If there was but one, we might have
* shown up -after- our helper's first synchronize_sched(), thus
* having failed to prevent CPU-reordering races with concurrent
* srcu_read_unlock()s on other CPUs (see comment below). So we
* either (1) wait for two or (2) supply the second ourselves.
*/
if ((sp->completed - idx) >= 2) {
mutex_unlock(&sp->mutex);
return;
}
sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() ensures that any CPU that
* sees the new value of sp->completed will also see any preceding
* changes to data structures made by this CPU. This prevents
* some other CPU from reordering the accesses in its SRCU
* read-side critical section to precede the corresponding
* srcu_read_lock() -- ensuring that such references will in
* fact be protected.
*
* So it is now safe to do the flip.
*/
idx = sp->completed & 0x1;
sp->completed++;
sync_func(); /* Force memory barrier on all CPUs. */
/*
* At this point, because of the preceding synchronize_sched(),
* all srcu_read_lock() calls using the old counters have completed.
* Their corresponding critical sections might well be still
* executing, but the srcu_read_lock() primitives themselves
* will have finished executing. We initially give readers
* an arbitrarily chosen 10 microseconds to get out of their
* SRCU read-side critical sections, then loop waiting 1/HZ
* seconds per iteration. The 10-microsecond value has done
* very well in testing.
*/
if (srcu_readers_active_idx(sp, idx))
udelay(SYNCHRONIZE_SRCU_READER_DELAY);
while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1);
sync_func(); /* Force memory barrier on all CPUs. */
/*
* The preceding synchronize_sched() forces all srcu_read_unlock()
* primitives that were executing concurrently with the preceding
* for_each_possible_cpu() loop to have completed by this point.
* More importantly, it also forces the corresponding SRCU read-side
* critical sections to have also completed, and the corresponding
* references to SRCU-protected data items to be dropped.
*
* Note:
*
* Despite what you might think at first glance, the
* preceding synchronize_sched() -must- be within the
* critical section ended by the following mutex_unlock().
* Otherwise, a task taking the early exit can race
* with a srcu_read_unlock(), which might have executed
* just before the preceding srcu_readers_active() check,
* and whose CPU might have reordered the srcu_read_unlock()
* with the preceding critical section. In this case, there
* is nothing preventing the synchronize_sched() task that is
* taking the early exit from freeing a data structure that
* is still being referenced (out of order) by the task
* doing the srcu_read_unlock().
*
* Alternatively, the comparison with "2" on the early exit
* could be changed to "3", but this increases synchronize_srcu()
* latency for bulk loads. So the current code is preferred.
*/
mutex_unlock(&sp->mutex);
}
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock.
* However, it is perfectly legal to call synchronize_srcu() on one
* srcu_struct from some other srcu_struct's read-side critical section.
*/
void synchronize_srcu(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
/**
* synchronize_srcu_expedited - like synchronize_srcu, but less patient
* @sp: srcu_struct with which to synchronize.
*
* Flip the completed counter, and wait for the old count to drain to zero.
* As with classic RCU, the updater must use some separate means of
* synchronizing concurrent updates. Can block; must be called from
* process context.
*
* Note that it is illegal to call synchronize_srcu_expedited()
* from the corresponding SRCU read-side critical section; doing so
* will result in deadlock. However, it is perfectly legal to call
* synchronize_srcu_expedited() on one srcu_struct from some other
* srcu_struct's read-side critical section.
*/
void synchronize_srcu_expedited(struct srcu_struct *sp)
{
__synchronize_srcu(sp, synchronize_sched_expedited);
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* srcu_batches_completed - return batches completed.
* @sp: srcu_struct on which to report batch completion.
*
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
long srcu_batches_completed(struct srcu_struct *sp)
{
return sp->completed;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
| gpl-2.0 |
anasanzari/Cowcopy | drivers/clk/tegra/clk-periph.c | 523 | 6024 | /*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include "clk.h"
static u8 clk_periph_get_parent(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *mux_ops = periph->mux_ops;
struct clk_hw *mux_hw = &periph->mux.hw;
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->get_parent(mux_hw);
}
static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *mux_ops = periph->mux_ops;
struct clk_hw *mux_hw = &periph->mux.hw;
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->set_parent(mux_hw, index);
}
static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
__clk_hw_set_clk(div_hw, hw);
return div_ops->recalc_rate(div_hw, parent_rate);
}
static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
__clk_hw_set_clk(div_hw, hw);
return div_ops->round_rate(div_hw, rate, prate);
}
static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
__clk_hw_set_clk(div_hw, hw);
return div_ops->set_rate(div_hw, rate, parent_rate);
}
static int clk_periph_is_enabled(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *gate_ops = periph->gate_ops;
struct clk_hw *gate_hw = &periph->gate.hw;
__clk_hw_set_clk(gate_hw, hw);
return gate_ops->is_enabled(gate_hw);
}
static int clk_periph_enable(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *gate_ops = periph->gate_ops;
struct clk_hw *gate_hw = &periph->gate.hw;
__clk_hw_set_clk(gate_hw, hw);
return gate_ops->enable(gate_hw);
}
static void clk_periph_disable(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *gate_ops = periph->gate_ops;
struct clk_hw *gate_hw = &periph->gate.hw;
gate_ops->disable(gate_hw);
}
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
.round_rate = clk_periph_round_rate,
.set_rate = clk_periph_set_rate,
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
};
static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
};
static const struct clk_ops tegra_clk_periph_no_gate_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
.round_rate = clk_periph_round_rate,
.set_rate = clk_periph_set_rate,
};
static struct clk *_tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph,
void __iomem *clk_base, u32 offset,
unsigned long flags)
{
struct clk *clk;
struct clk_init_data init;
struct tegra_clk_periph_regs *bank;
bool div = !(periph->gate.flags & TEGRA_PERIPH_NO_DIV);
if (periph->gate.flags & TEGRA_PERIPH_NO_DIV) {
flags |= CLK_SET_RATE_PARENT;
init.ops = &tegra_clk_periph_nodiv_ops;
} else if (periph->gate.flags & TEGRA_PERIPH_NO_GATE)
init.ops = &tegra_clk_periph_no_gate_ops;
else
init.ops = &tegra_clk_periph_ops;
init.name = name;
init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
bank = get_reg_bank(periph->gate.clk_num);
if (!bank)
return ERR_PTR(-EINVAL);
/* Data in .init is copied by clk_register(), so stack variable OK */
periph->hw.init = &init;
periph->magic = TEGRA_CLK_PERIPH_MAGIC;
periph->mux.reg = clk_base + offset;
periph->divider.reg = div ? (clk_base + offset) : NULL;
periph->gate.clk_base = clk_base;
periph->gate.regs = bank;
periph->gate.enable_refcnt = periph_clk_enb_refcnt;
clk = clk_register(NULL, &periph->hw);
if (IS_ERR(clk))
return clk;
periph->mux.hw.clk = clk;
periph->divider.hw.clk = div ? clk : NULL;
periph->gate.hw.clk = clk;
return clk;
}
struct clk *tegra_clk_register_periph(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset, unsigned long flags)
{
return _tegra_clk_register_periph(name, parent_names, num_parents,
periph, clk_base, offset, flags);
}
struct clk *tegra_clk_register_periph_nodiv(const char *name,
const char **parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset)
{
periph->gate.flags |= TEGRA_PERIPH_NO_DIV;
return _tegra_clk_register_periph(name, parent_names, num_parents,
periph, clk_base, offset, CLK_SET_RATE_PARENT);
}
| gpl-2.0 |
tinyclub/preempt-rt-linux | arch/sparc/kernel/pmc.c | 779 | 2047 | /* pmc - Driver implementation for power management functions
* of Power Management Controller (PMC) on SPARCstation-Voyager.
*
* Copyright (c) 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/auxio.h>
/* Debug
*
* #define PMC_DEBUG_LED
* #define PMC_NO_IDLE
*/
#define PMC_OBPNAME "SUNW,pmc"
#define PMC_DEVNAME "pmc"
#define PMC_IDLE_REG 0x00
#define PMC_IDLE_ON 0x01
static u8 __iomem *regs;
#define pmc_readb(offs) (sbus_readb(regs+offs))
#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
static void pmc_swift_idle(void)
{
#ifdef PMC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
#endif
pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
#ifdef PMC_DEBUG_LED
set_auxio(AUXIO_LED, 0x00);
#endif
}
static int __devinit pmc_probe(struct of_device *op,
const struct of_device_id *match)
{
regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]), PMC_OBPNAME);
if (!regs) {
printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME);
return -ENODEV;
}
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
pm_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
return 0;
}
static struct of_device_id __initdata pmc_match[] = {
{
.name = PMC_OBPNAME,
},
{},
};
MODULE_DEVICE_TABLE(of, pmc_match);
static struct of_platform_driver pmc_driver = {
.name = "pmc",
.match_table = pmc_match,
.probe = pmc_probe,
};
static int __init pmc_init(void)
{
return of_register_driver(&pmc_driver, &of_bus_type);
}
/* This driver is not critical to the boot process
* and is easiest to ioremap when SBus is already
* initialized, so we install ourselves thusly:
*/
__initcall(pmc_init);
| gpl-2.0 |
javelinanddart/bricked-flo | net/ipv4/fib_semantics.c | 779 | 29702 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: semantics.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include "fib_lookup.h"
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
static unsigned int fib_info_hash_size;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static DEFINE_SPINLOCK(fib_multipath_lock);
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh; \
for (nhsel = 0, nh = (fi)->fib_nh; \
nhsel < (fi)->fib_nhs; \
nh++, nhsel++)
#define change_nexthops(fi) { \
int nhsel; struct fib_nh *nexthop_nh; \
for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
nhsel < (fi)->fib_nhs; \
nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
/* Hope, that gcc will optimize it to get rid of dummy loop */
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
for (nhsel = 0; nhsel < 1; nhsel++)
#define change_nexthops(fi) { \
int nhsel; \
struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel = 0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
#define endfor_nexthops(fi) }
const struct fib_prop fib_props[RTN_MAX + 1] = {
[RTN_UNSPEC] = {
.error = 0,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_UNICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_LOCAL] = {
.error = 0,
.scope = RT_SCOPE_HOST,
},
[RTN_BROADCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_ANYCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_MULTICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_BLACKHOLE] = {
.error = -EINVAL,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_UNREACHABLE] = {
.error = -EHOSTUNREACH,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_PROHIBIT] = {
.error = -EACCES,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_THROW] = {
.error = -EAGAIN,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_NAT] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_XRESOLVE] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
};
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
}
void free_fib_info(struct fib_info *fi)
{
if (fi->fib_dead == 0) {
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
nexthop_nh->nh_dev = NULL;
} endfor_nexthops(fi);
fib_info_cnt--;
release_net(fi->fib_net);
call_rcu(&fi->rcu, free_fib_info_rcu);
}
void fib_release_info(struct fib_info *fi)
{
spin_lock_bh(&fib_info_lock);
if (fi && --fi->fib_treeref == 0) {
hlist_del(&fi->fib_hash);
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
change_nexthops(fi) {
if (!nexthop_nh->nh_dev)
continue;
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
fi->fib_dead = 1;
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
}
static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
{
const struct fib_nh *onh = ofi->fib_nh;
for_nexthops(fi) {
if (nh->nh_oif != onh->nh_oif ||
nh->nh_gw != onh->nh_gw ||
nh->nh_scope != onh->nh_scope ||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight != onh->nh_weight ||
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
return -1;
onh++;
} endfor_nexthops(fi);
return 0;
}
static inline unsigned int fib_devindex_hashfn(unsigned int val)
{
unsigned int mask = DEVINDEX_HASHSIZE - 1;
return (val ^
(val >> DEVINDEX_HASHBITS) ^
(val >> (DEVINDEX_HASHBITS * 2))) & mask;
}
static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
{
unsigned int mask = (fib_info_hash_size - 1);
unsigned int val = fi->fib_nhs;
val ^= (fi->fib_protocol << 8) | fi->fib_scope;
val ^= (__force u32)fi->fib_prefsrc;
val ^= fi->fib_priority;
for_nexthops(fi) {
val ^= fib_devindex_hashfn(nh->nh_oif);
} endfor_nexthops(fi)
return (val ^ (val >> 7) ^ (val >> 12)) & mask;
}
static struct fib_info *fib_find_info(const struct fib_info *nfi)
{
struct hlist_head *head;
struct hlist_node *node;
struct fib_info *fi;
unsigned int hash;
hash = fib_info_hashfn(nfi);
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, node, head, fib_hash) {
if (!net_eq(fi->fib_net, nfi->fib_net))
continue;
if (fi->fib_nhs != nfi->fib_nhs)
continue;
if (nfi->fib_protocol == fi->fib_protocol &&
nfi->fib_scope == fi->fib_scope &&
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX) == 0 &&
((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
(nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
return fi;
}
return NULL;
}
/* Check, that the gateway is already configured.
* Used only by redirect accept routine.
*/
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
struct hlist_node *node;
struct fib_nh *nh;
unsigned int hash;
spin_lock(&fib_info_lock);
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
hlist_for_each_entry(nh, node, head, nh_hash) {
if (nh->nh_dev == dev &&
nh->nh_gw == gw &&
!(nh->nh_flags & RTNH_F_DEAD)) {
spin_unlock(&fib_info_lock);
return 0;
}
}
spin_unlock(&fib_info_lock);
return -1;
}
static inline size_t fib_nlmsg_size(struct fib_info *fi)
{
size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_DST */
+ nla_total_size(4) /* RTA_PRIORITY */
+ nla_total_size(4); /* RTA_PREFSRC */
/* space for nested metrics */
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
if (fi->fib_nhs) {
/* Also handles the special case fib_nhs == 1 */
/* each nexthop is packed in an attribute */
size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
/* may contain flow and gateway attribute */
nhsize += 2 * nla_total_size(4);
/* all nexthops are packed in a nested attribute */
payload += nla_total_size(fi->fib_nhs * nhsize);
}
return payload;
}
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
int dst_len, u32 tb_id, struct nl_info *info,
unsigned int nlm_flags)
{
struct sk_buff *skb;
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
if (skb == NULL)
goto errout;
err = fib_dump_info(skb, info->pid, seq, event, tb_id,
fa->fa_type, key, dst_len,
fa->fa_tos, fa->fa_info, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
info->nlh, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
/* Return the first fib alias matching TOS with
* priority less than or equal to PRIO.
*/
struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
{
if (fah) {
struct fib_alias *fa;
list_for_each_entry(fa, fah, fa_list) {
if (fa->fa_tos > tos)
continue;
if (fa->fa_info->fib_priority >= prio ||
fa->fa_tos < tos)
return fa;
}
}
return NULL;
}
int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx, int dflt)
{
struct neighbour *n;
int state = NUD_NONE;
n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
if (n) {
state = n->nud_state;
neigh_release(n);
}
if (state == NUD_REACHABLE)
return 0;
if ((state & NUD_VALID) && order != dflt)
return 0;
if ((state & NUD_VALID) ||
(*last_idx < 0 && order > dflt)) {
*last_resort = fi;
*last_idx = order;
}
return 1;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
{
int nhs = 0;
while (rtnh_ok(rtnh, remaining)) {
nhs++;
rtnh = rtnh_next(rtnh, &remaining);
}
/* leftover implies invalid nexthop configuration, discard it */
return remaining > 0 ? 0 : nhs;
}
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg)
{
change_nexthops(fi) {
int attrlen;
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
return 0;
}
#endif
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
#endif
if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
return 1;
if (cfg->fc_oif || cfg->fc_gw) {
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
return 1;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp == NULL)
return 0;
rtnh = cfg->fc_mp;
remaining = cfg->fc_mp_len;
for_nexthops(fi) {
int attrlen;
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
return 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen < 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla && nla_get_be32(nla) != nh->nh_gw)
return 1;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
#endif
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
#endif
return 0;
}
/*
* Picture
* -------
*
* Semantics of nexthop is very messy by historical reasons.
* We have to take into account, that:
* a) gateway can be actually local interface address,
* so that gatewayed route is direct.
* b) gateway must be on-link address, possibly
* described not by an ifaddr, but also by a direct route.
* c) If both gateway and interface are specified, they should not
* contradict.
* d) If we use tunnel routes, gateway could be not on-link.
*
* Attempt to reconcile all of these (alas, self-contradictory) conditions
* results in pretty ugly and hairy code with obscure logic.
*
* I chose to generalized it instead, so that the size
* of code does not increase practically, but it becomes
* much more general.
* Every prefix is assigned a "scope" value: "host" is local address,
* "link" is direct route,
* [ ... "site" ... "interior" ... ]
* and "universe" is true gateway route with global meaning.
*
* Every prefix refers to a set of "nexthop"s (gw, oif),
* where gw must have narrower scope. This recursion stops
* when gw has LOCAL scope or if "nexthop" is declared ONLINK,
* which means that gw is forced to be on link.
*
* Code is still hairy, but now it is apparently logically
* consistent and very flexible. F.e. as by-product it allows
* to co-exists in peace independent exterior and interior
* routing processes.
*
* Normally it looks as following.
*
* {universe prefix} -> (gw, oif) [scope link]
* |
* |-> {link prefix} -> (gw, oif) [scope local]
* |
* |-> {local prefix} (terminal node)
*/
static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
struct fib_nh *nh)
{
int err;
struct net *net;
struct net_device *dev;
net = cfg->fc_nlinfo.nl_net;
if (nh->nh_gw) {
struct fib_result res;
if (nh->nh_flags & RTNH_F_ONLINK) {
if (cfg->fc_scope >= RT_SCOPE_LINK)
return -EINVAL;
if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
return -EINVAL;
dev = __dev_get_by_index(net, nh->nh_oif);
if (!dev)
return -ENODEV;
if (!(dev->flags & IFF_UP))
return -ENETDOWN;
nh->nh_dev = dev;
dev_hold(dev);
nh->nh_scope = RT_SCOPE_LINK;
return 0;
}
rcu_read_lock();
{
struct flowi4 fl4 = {
.daddr = nh->nh_gw,
.flowi4_scope = cfg->fc_scope + 1,
.flowi4_oif = nh->nh_oif,
};
/* It is not necessary, but requires a bit of thinking */
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
err = fib_lookup(net, &fl4, &res);
if (err) {
rcu_read_unlock();
return err;
}
}
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
goto out;
nh->nh_scope = res.scope;
nh->nh_oif = FIB_RES_OIF(res);
nh->nh_dev = dev = FIB_RES_DEV(res);
if (!dev)
goto out;
dev_hold(dev);
err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
} else {
struct in_device *in_dev;
if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
return -EINVAL;
rcu_read_lock();
err = -ENODEV;
in_dev = inetdev_by_index(net, nh->nh_oif);
if (in_dev == NULL)
goto out;
err = -ENETDOWN;
if (!(in_dev->dev->flags & IFF_UP))
goto out;
nh->nh_dev = in_dev->dev;
dev_hold(nh->nh_dev);
nh->nh_scope = RT_SCOPE_HOST;
err = 0;
}
out:
rcu_read_unlock();
return err;
}
static inline unsigned int fib_laddr_hashfn(__be32 val)
{
unsigned int mask = (fib_info_hash_size - 1);
return ((__force u32)val ^
((__force u32)val >> 7) ^
((__force u32)val >> 14)) & mask;
}
static struct hlist_head *fib_info_hash_alloc(int bytes)
{
if (bytes <= PAGE_SIZE)
return kzalloc(bytes, GFP_KERNEL);
else
return (struct hlist_head *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(bytes));
}
static void fib_info_hash_free(struct hlist_head *hash, int bytes)
{
if (!hash)
return;
if (bytes <= PAGE_SIZE)
kfree(hash);
else
free_pages((unsigned long) hash, get_order(bytes));
}
static void fib_info_hash_move(struct hlist_head *new_info_hash,
struct hlist_head *new_laddrhash,
unsigned int new_size)
{
struct hlist_head *old_info_hash, *old_laddrhash;
unsigned int old_size = fib_info_hash_size;
unsigned int i, bytes;
spin_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
fib_info_hash_size = new_size;
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
struct hlist_node *node, *n;
struct fib_info *fi;
hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
struct hlist_head *dest;
unsigned int new_hash;
hlist_del(&fi->fib_hash);
new_hash = fib_info_hashfn(fi);
dest = &new_info_hash[new_hash];
hlist_add_head(&fi->fib_hash, dest);
}
}
fib_info_hash = new_info_hash;
for (i = 0; i < old_size; i++) {
struct hlist_head *lhead = &fib_info_laddrhash[i];
struct hlist_node *node, *n;
struct fib_info *fi;
hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
struct hlist_head *ldest;
unsigned int new_hash;
hlist_del(&fi->fib_lhash);
new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
ldest = &new_laddrhash[new_hash];
hlist_add_head(&fi->fib_lhash, ldest);
}
}
fib_info_laddrhash = new_laddrhash;
spin_unlock_bh(&fib_info_lock);
bytes = old_size * sizeof(struct hlist_head *);
fib_info_hash_free(old_info_hash, bytes);
fib_info_hash_free(old_laddrhash, bytes);
}
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
{
nh->nh_saddr = inet_select_addr(nh->nh_dev,
nh->nh_gw,
nh->nh_parent->fib_scope);
nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
return nh->nh_saddr;
}
struct fib_info *fib_create_info(struct fib_config *cfg)
{
int err;
struct fib_info *fi = NULL;
struct fib_info *ofi;
int nhs = 1;
struct net *net = cfg->fc_nlinfo.nl_net;
if (cfg->fc_type > RTN_MAX)
goto err_inval;
/* Fast check to catch the most weird cases */
if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
goto err_inval;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
if (nhs == 0)
goto err_inval;
}
#endif
err = -ENOBUFS;
if (fib_info_cnt >= fib_info_hash_size) {
unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
unsigned int bytes;
if (!new_size)
new_size = 1;
bytes = new_size * sizeof(struct hlist_head *);
new_info_hash = fib_info_hash_alloc(bytes);
new_laddrhash = fib_info_hash_alloc(bytes);
if (!new_info_hash || !new_laddrhash) {
fib_info_hash_free(new_info_hash, bytes);
fib_info_hash_free(new_laddrhash, bytes);
} else
fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
if (!fib_info_hash_size)
goto failure;
}
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
if (!fi->fib_metrics)
goto failure;
} else
fi->fib_metrics = (u32 *) dst_default_metrics;
fib_info_cnt++;
fi->fib_net = hold_net(net);
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
fi->fib_flags = cfg->fc_flags;
fi->fib_priority = cfg->fc_priority;
fi->fib_prefsrc = cfg->fc_prefsrc;
fi->fib_nhs = nhs;
change_nexthops(fi) {
nexthop_nh->nh_parent = fi;
} endfor_nexthops(fi)
if (cfg->fc_mx) {
struct nlattr *nla;
int remaining;
nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
int type = nla_type(nla);
if (type) {
if (type > RTAX_MAX)
goto err_inval;
fi->fib_metrics[type - 1] = nla_get_u32(nla);
}
}
}
if (cfg->fc_mp) {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
if (err != 0)
goto failure;
if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
goto err_inval;
if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
goto err_inval;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
goto err_inval;
#endif
#else
goto err_inval;
#endif
} else {
struct fib_nh *nh = fi->fib_nh;
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = 1;
#endif
}
if (fib_props[cfg->fc_type].error) {
if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
goto err_inval;
goto link_it;
} else {
switch (cfg->fc_type) {
case RTN_UNICAST:
case RTN_LOCAL:
case RTN_BROADCAST:
case RTN_ANYCAST:
case RTN_MULTICAST:
break;
default:
goto err_inval;
}
}
if (cfg->fc_scope > RT_SCOPE_HOST)
goto err_inval;
if (cfg->fc_scope == RT_SCOPE_HOST) {
struct fib_nh *nh = fi->fib_nh;
/* Local address is added. */
if (nhs != 1 || nh->nh_gw)
goto err_inval;
nh->nh_scope = RT_SCOPE_NOWHERE;
nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
err = -ENODEV;
if (nh->nh_dev == NULL)
goto failure;
} else {
change_nexthops(fi) {
err = fib_check_nh(cfg, fi, nexthop_nh);
if (err != 0)
goto failure;
} endfor_nexthops(fi)
}
if (fi->fib_prefsrc) {
if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
fi->fib_prefsrc != cfg->fc_dst)
if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
goto err_inval;
}
change_nexthops(fi) {
fib_info_update_nh_saddr(net, nexthop_nh);
} endfor_nexthops(fi)
link_it:
ofi = fib_find_info(fi);
if (ofi) {
fi->fib_dead = 1;
free_fib_info(fi);
ofi->fib_treeref++;
return ofi;
}
fi->fib_treeref++;
atomic_inc(&fi->fib_clntref);
spin_lock_bh(&fib_info_lock);
hlist_add_head(&fi->fib_hash,
&fib_info_hash[fib_info_hashfn(fi)]);
if (fi->fib_prefsrc) {
struct hlist_head *head;
head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
hlist_add_head(&fi->fib_lhash, head);
}
change_nexthops(fi) {
struct hlist_head *head;
unsigned int hash;
if (!nexthop_nh->nh_dev)
continue;
hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
head = &fib_info_devhash[hash];
hlist_add_head(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
spin_unlock_bh(&fib_info_lock);
return fi;
err_inval:
err = -EINVAL;
failure:
if (fi) {
fi->fib_dead = 1;
free_fib_info(fi);
}
return ERR_PTR(err);
}
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
struct fib_info *fi, unsigned int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET;
rtm->rtm_dst_len = dst_len;
rtm->rtm_src_len = 0;
rtm->rtm_tos = tos;
if (tb_id < 256)
rtm->rtm_table = tb_id;
else
rtm->rtm_table = RT_TABLE_COMPAT;
NLA_PUT_U32(skb, RTA_TABLE, tb_id);
rtm->rtm_type = type;
rtm->rtm_flags = fi->fib_flags;
rtm->rtm_scope = fi->fib_scope;
rtm->rtm_protocol = fi->fib_protocol;
if (rtm->rtm_dst_len)
NLA_PUT_BE32(skb, RTA_DST, dst);
if (fi->fib_priority)
NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
goto nla_put_failure;
if (fi->fib_prefsrc)
NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
if (fi->fib_nhs == 1) {
if (fi->fib_nh->nh_gw)
NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
if (fi->fib_nh->nh_oif)
NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (fi->fib_nh[0].nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
#endif
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
struct rtnexthop *rtnh;
struct nlattr *mp;
mp = nla_nest_start(skb, RTA_MULTIPATH);
if (mp == NULL)
goto nla_put_failure;
for_nexthops(fi) {
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
if (rtnh == NULL)
goto nla_put_failure;
rtnh->rtnh_flags = nh->nh_flags & 0xFF;
rtnh->rtnh_hops = nh->nh_weight - 1;
rtnh->rtnh_ifindex = nh->nh_oif;
if (nh->nh_gw)
NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
#endif
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);
nla_nest_end(skb, mp);
}
#endif
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
/*
* Update FIB if:
* - local address disappeared -> we must delete all the entries
* referring to it.
* - device went down -> we must shutdown all nexthops going via it.
*/
int fib_sync_down_addr(struct net *net, __be32 local)
{
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
struct hlist_node *node;
struct fib_info *fi;
if (fib_info_laddrhash == NULL || local == 0)
return 0;
hlist_for_each_entry(fi, node, head, fib_lhash) {
if (!net_eq(fi->fib_net, net))
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;
ret++;
}
}
return ret;
}
int fib_sync_down_dev(struct net_device *dev, int force)
{
int ret = 0;
int scope = RT_SCOPE_NOWHERE;
struct fib_info *prev_fi = NULL;
unsigned int hash = fib_devindex_hashfn(dev->ifindex);
struct hlist_head *head = &fib_info_devhash[hash];
struct hlist_node *node;
struct fib_nh *nh;
if (force)
scope = -1;
hlist_for_each_entry(nh, node, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int dead;
BUG_ON(!fi->fib_nhs);
if (nh->nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
if (nexthop_nh->nh_flags & RTNH_F_DEAD)
dead++;
else if (nexthop_nh->nh_dev == dev &&
nexthop_nh->nh_scope != scope) {
nexthop_nh->nh_flags |= RTNH_F_DEAD;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
spin_lock_bh(&fib_multipath_lock);
fi->fib_power -= nexthop_nh->nh_power;
nexthop_nh->nh_power = 0;
spin_unlock_bh(&fib_multipath_lock);
#endif
dead++;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (force > 1 && nexthop_nh->nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
#endif
} endfor_nexthops(fi)
if (dead == fi->fib_nhs) {
fi->fib_flags |= RTNH_F_DEAD;
ret++;
}
}
return ret;
}
/* Must be invoked inside of an RCU protected region. */
void fib_select_default(struct fib_result *res)
{
struct fib_info *fi = NULL, *last_resort = NULL;
struct list_head *fa_head = res->fa_head;
struct fib_table *tb = res->table;
int order = -1, last_idx = -1;
struct fib_alias *fa;
list_for_each_entry_rcu(fa, fa_head, fa_list) {
struct fib_info *next_fi = fa->fa_info;
if (next_fi->fib_scope != res->scope ||
fa->fa_type != RTN_UNICAST)
continue;
if (next_fi->fib_priority > res->fi->fib_priority)
break;
if (!next_fi->fib_nh[0].nh_gw ||
next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
continue;
fib_alias_accessed(fa);
if (fi == NULL) {
if (next_fi != res->fi)
break;
} else if (!fib_detect_death(fi, order, &last_resort,
&last_idx, tb->tb_default)) {
fib_result_assign(res, fi);
tb->tb_default = order;
goto out;
}
fi = next_fi;
order++;
}
if (order <= 0 || fi == NULL) {
tb->tb_default = -1;
goto out;
}
if (!fib_detect_death(fi, order, &last_resort, &last_idx,
tb->tb_default)) {
fib_result_assign(res, fi);
tb->tb_default = order;
goto out;
}
if (last_idx >= 0)
fib_result_assign(res, last_resort);
tb->tb_default = last_idx;
out:
return;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/*
* Dead device goes up. We wake up dead nexthops.
* It takes sense only on multipath routes.
*/
int fib_sync_up(struct net_device *dev)
{
struct fib_info *prev_fi;
unsigned int hash;
struct hlist_head *head;
struct hlist_node *node;
struct fib_nh *nh;
int ret;
if (!(dev->flags & IFF_UP))
return 0;
prev_fi = NULL;
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
ret = 0;
hlist_for_each_entry(nh, node, head, nh_hash) {
struct fib_info *fi = nh->nh_parent;
int alive;
BUG_ON(!fi->fib_nhs);
if (nh->nh_dev != dev || fi == prev_fi)
continue;
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
alive++;
continue;
}
if (nexthop_nh->nh_dev == NULL ||
!(nexthop_nh->nh_dev->flags & IFF_UP))
continue;
if (nexthop_nh->nh_dev != dev ||
!__in_dev_get_rtnl(dev))
continue;
alive++;
spin_lock_bh(&fib_multipath_lock);
nexthop_nh->nh_power = 0;
nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
spin_unlock_bh(&fib_multipath_lock);
} endfor_nexthops(fi)
if (alive > 0) {
fi->fib_flags &= ~RTNH_F_DEAD;
ret++;
}
}
return ret;
}
/*
* The algorithm is suboptimal, but it provides really
* fair weighted route distribution.
*/
void fib_select_multipath(struct fib_result *res)
{
struct fib_info *fi = res->fi;
int w;
spin_lock_bh(&fib_multipath_lock);
if (fi->fib_power <= 0) {
int power = 0;
change_nexthops(fi) {
if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
power += nexthop_nh->nh_weight;
nexthop_nh->nh_power = nexthop_nh->nh_weight;
}
} endfor_nexthops(fi);
fi->fib_power = power;
if (power <= 0) {
spin_unlock_bh(&fib_multipath_lock);
/* Race condition: route has just become dead. */
res->nh_sel = 0;
return;
}
}
/* w should be random number [0..fi->fib_power-1],
* it is pretty bad approximation.
*/
w = jiffies % fi->fib_power;
change_nexthops(fi) {
if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
nexthop_nh->nh_power) {
w -= nexthop_nh->nh_power;
if (w <= 0) {
nexthop_nh->nh_power--;
fi->fib_power--;
res->nh_sel = nhsel;
spin_unlock_bh(&fib_multipath_lock);
return;
}
}
} endfor_nexthops(fi);
/* Race condition: route has just become dead. */
res->nh_sel = 0;
spin_unlock_bh(&fib_multipath_lock);
}
#endif
| gpl-2.0 |
admiralspark/NT-sparkkernel | drivers/misc/sgi-gru/grufault.c | 1291 | 23721 | /*
* SN Platform GRU Driver
*
* FAULT HANDLER FOR GRU DETECTED TLB MISSES
*
* This file contains code that handles TLB misses within the GRU.
* These misses are reported either via interrupts or user polling of
* the user CB.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/security.h>
#include <asm/pgtable.h>
#include "gru.h"
#include "grutables.h"
#include "grulib.h"
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
/* Return codes for vtop functions */
#define VTOP_SUCCESS 0
#define VTOP_INVALID -1
#define VTOP_RETRY -2
/*
* Test if a physical address is a valid GRU GSEG address
*/
static inline int is_gru_paddr(unsigned long paddr)
{
return paddr >= gru_start_paddr && paddr < gru_end_paddr;
}
/*
* Find the vma of a GRU segment. Caller must hold mmap_sem.
*/
struct vm_area_struct *gru_find_vma(unsigned long vaddr)
{
struct vm_area_struct *vma;
vma = find_vma(current->mm, vaddr);
if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
return vma;
return NULL;
}
/*
* Find and lock the gts that contains the specified user vaddr.
*
* Returns:
* - *gts with the mmap_sem locked for read and the GTS locked.
* - NULL if vaddr invalid OR is not a valid GSEG vaddr.
*/
static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
down_read(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (gts)
mutex_lock(>s->ts_ctxlock);
else
up_read(&mm->mmap_sem);
return gts;
}
static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
down_write(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
if (!vma)
goto err;
gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
if (IS_ERR(gts))
goto err;
mutex_lock(>s->ts_ctxlock);
downgrade_write(&mm->mmap_sem);
return gts;
err:
up_write(&mm->mmap_sem);
return gts;
}
/*
* Unlock a GTS that was previously locked with gru_find_lock_gts().
*/
static void gru_unlock_gts(struct gru_thread_state *gts)
{
mutex_unlock(>s->ts_ctxlock);
up_read(¤t->mm->mmap_sem);
}
/*
* Set a CB.istatus to active using a user virtual address. This must be done
* just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
* If the line is evicted, the status may be lost. The in-cache update
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
*/
static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
if (cbk) {
cbk->istatus = CBS_ACTIVE;
}
}
/*
* Read & clear a TFM
*
* The GRU has an array of fault maps. A map is private to a cpu
* Only one cpu will be accessing a cpu's fault map.
*
* This function scans the cpu-private fault map & clears all bits that
* are set. The function returns a bitmap that indicates the bits that
* were cleared. Note that sense the maps may be updated asynchronously by
* the GRU, atomic operations must be used to clear bits.
*/
static void get_clear_fault_map(struct gru_state *gru,
struct gru_tlb_fault_map *imap,
struct gru_tlb_fault_map *dmap)
{
unsigned long i, k;
struct gru_tlb_fault_map *tfm;
tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
prefetchw(tfm); /* Helps on hardware, required for emulator */
for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
k = tfm->fault_bits[i];
if (k)
k = xchg(&tfm->fault_bits[i], 0UL);
imap->fault_bits[i] = k;
k = tfm->done_bits[i];
if (k)
k = xchg(&tfm->done_bits[i], 0UL);
dmap->fault_bits[i] = k;
}
/*
* Not functionally required but helps performance. (Required
* on emulator)
*/
gru_flush_cache(tfm);
}
/*
* Atomic (interrupt context) & non-atomic (user context) functions to
* convert a vaddr into a physical address. The size of the page
* is returned in pageshift.
* returns:
* 0 - successful
* < 0 - error code
* 1 - (atomic only) try again in non-atomic context
*/
static int non_atomic_pte_lookup(struct vm_area_struct *vma,
unsigned long vaddr, int write,
unsigned long *paddr, int *pageshift)
{
struct page *page;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
return 0;
}
/*
* atomic_pte_lookup
*
* Convert a user virtual address to a physical address
* Only supports Intel large pages (2MB only) on x86_64.
* ZZZ - hugepage support is incomplete
*
* NOTE: mmap_sem is already held on entry to this function. This
* guarantees existence of the page tables.
*/
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
int write, unsigned long *paddr, int *pageshift)
{
pgd_t *pgdp;
pmd_t *pmdp;
pud_t *pudp;
pte_t pte;
pgdp = pgd_offset(vma->vm_mm, vaddr);
if (unlikely(pgd_none(*pgdp)))
goto err;
pudp = pud_offset(pgdp, vaddr);
if (unlikely(pud_none(*pudp)))
goto err;
pmdp = pmd_offset(pudp, vaddr);
if (unlikely(pmd_none(*pmdp)))
goto err;
#ifdef CONFIG_X86_64
if (unlikely(pmd_large(*pmdp)))
pte = *(pte_t *) pmdp;
else
#endif
pte = *pte_offset_kernel(pmdp, vaddr);
if (unlikely(!pte_present(pte) ||
(write && (!pte_write(pte) || !pte_dirty(pte)))))
return 1;
*paddr = pte_pfn(pte) << PAGE_SHIFT;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
return 0;
err:
return 1;
}
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
int write, int atomic, unsigned long *gpa, int *pageshift)
{
struct mm_struct *mm = gts->ts_mm;
struct vm_area_struct *vma;
unsigned long paddr;
int ret, ps;
vma = find_vma(mm, vaddr);
if (!vma)
goto inval;
/*
* Atomic lookup is faster & usually works even if called in non-atomic
* context.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
if (ret) {
if (atomic)
goto upm;
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
goto inval;
}
if (is_gru_paddr(paddr))
goto inval;
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
return VTOP_SUCCESS;
inval:
return VTOP_INVALID;
upm:
return VTOP_RETRY;
}
/*
* Flush a CBE from cache. The CBE is clean in the cache. Dirty the
* CBE cacheline so that the line will be written back to home agent.
* Otherwise the line may be silently dropped. This has no impact
* except on performance.
*/
static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
{
if (unlikely(cbe)) {
cbe->cbrexecstatus = 0; /* make CL dirty */
gru_flush_cache(cbe);
}
}
/*
* Preload the TLB with entries that may be required. Currently, preloading
* is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
* the end of the bcopy tranfer, whichever is smaller.
*/
static void gru_preload_tlb(struct gru_state *gru,
struct gru_thread_state *gts, int atomic,
unsigned long fault_vaddr, int asid, int write,
unsigned char tlb_preload_count,
struct gru_tlb_fault_handle *tfh,
struct gru_control_block_extended *cbe)
{
unsigned long vaddr = 0, gpa;
int ret, pageshift;
if (cbe->opccpy != OP_BCOPY)
return;
if (fault_vaddr == cbe->cbe_baddr0)
vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
else if (fault_vaddr == cbe->cbe_baddr1)
vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
fault_vaddr &= PAGE_MASK;
vaddr &= PAGE_MASK;
vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
while (vaddr > fault_vaddr) {
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift)))
return;
gru_dbg(grudev,
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
vaddr, asid, write, pageshift, gpa);
vaddr -= PAGE_SIZE;
STAT(tlb_preload_page);
}
}
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* Input:
* cb Address of user CBR. Null if not running in user context
* Return:
* 0 = dropin, exception, or switch to UPM successful
* 1 = range invalidate active
* < 0 = error code
*
*/
static int gru_try_dropin(struct gru_state *gru,
struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
struct gru_instruction_bits *cbk)
{
struct gru_control_block_extended *cbe = NULL;
unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
* NOTE: The GRU contains magic hardware that eliminates races between
* TLB invalidates and TLB dropins. If an invalidate occurs
* in the window between reading the TFH and the subsequent TLB dropin,
* the dropin is ignored. This eliminates the need for additional locks.
*/
/*
* Prefetch the CBE if doing TLB preloading
*/
if (unlikely(tlb_preload_count)) {
cbe = gru_tfh_to_cbe(tfh);
prefetchw(cbe);
}
/*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
* is a transient state.
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
rmb(); /* TFH must be cache resident before reading ms_range_active */
/*
* TFH is cache resident - at least briefly. Fail the dropin
* if a range invalidate is active.
*/
if (atomic_read(>s->ts_gms->ms_range_active))
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret == VTOP_INVALID)
goto failinval;
if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
gru_flush_cache_cbe(cbe);
}
gru_cb_set_istatus_active(cbk);
gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
gru_dbg(grudev,
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
" rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
indexway, write, pageshift, gpa);
STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
return 1;
}
/*
* Process an external interrupt from the GRU. This interrupt is
* caused by a TLB miss.
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
gru_dbg(grudev,
"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
smp_processor_id(), chiplet, gru->gs_gid,
imap.fault_bits[0], imap.fault_bits[1],
dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
STAT(intr_cbr);
cmp = gru->gs_blade->bs_async_wq;
if (cmp)
complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
/*
* When hardware sets a bit in the faultmap, it implicitly
* locks the GRU context so that it cannot be unloaded.
* The gts cannot change until a TFH start/writestart command
* is issued.
*/
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
/* Spurious interrupts can cause this. Ignore. */
if (!gts) {
STAT(intr_spurious);
continue;
}
/*
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
down_read_trylock(>s->ts_mm->mmap_sem)) {
gru_try_dropin(gru, gts, tfh, NULL);
up_read(>s->ts_mm->mmap_sem);
} else {
tfh_user_polling_mode(tfh);
STAT(intr_mm_lock_failed);
}
}
return IRQ_HANDLED;
}
irqreturn_t gru0_intr(int irq, void *dev_id)
{
return gru_intr(0, uv_numa_blade_id());
}
irqreturn_t gru1_intr(int irq, void *dev_id)
{
return gru_intr(1, uv_numa_blade_id());
}
irqreturn_t gru_intr_mblade(int irq, void *dev_id)
{
int blade;
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
gru_intr(0, blade);
gru_intr(1, blade);
}
return IRQ_HANDLED;
}
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
}
}
/*
* This interface is called as a result of a user detecting a "call OS" bit
* in a user CB. Normally means that a TLB fault has occurred.
* cb - user virtual address of the CB
*/
int gru_handle_user_call_os(unsigned long cb)
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
gru_check_context_placement(gts);
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
gts->ts_ctxnum, ucbnum);
ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
return ret;
}
/*
* Fetch the exception detail information for a CB that terminated with
* an exception.
*/
int gru_get_exception_detail(unsigned long arg)
{
struct control_block_extended_exc_detail excdet;
struct gru_control_block_extended *cbe;
struct gru_thread_state *gts;
int ucbnum, cbrnum, ret;
STAT(user_exception);
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
} else if (gts->ts_gru) {
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
excdet.exceptdet0 = cbe->idef1upd;
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
}
gru_unlock_gts(gts);
gru_dbg(grudev,
"cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
"exdet0 0x%lx, exdet1 0x%x\n",
excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
ret = -EFAULT;
return ret;
}
/*
* User request to unload a context. Content is saved for possible reload.
*/
static int gru_unload_all_contexts(void)
{
struct gru_thread_state *gts;
struct gru_state *gru;
int gid, ctxnum;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
foreach_gid(gid) {
gru = GID_TO_GRU(gid);
spin_lock(&gru->gs_lock);
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
gts = gru->gs_gts[ctxnum];
if (gts && mutex_trylock(>s->ts_ctxlock)) {
spin_unlock(&gru->gs_lock);
gru_unload_context(gts, 1);
mutex_unlock(>s->ts_ctxlock);
spin_lock(&gru->gs_lock);
}
}
spin_unlock(&gru->gs_lock);
}
return 0;
}
int gru_user_unload_context(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_unload_context_req req;
STAT(user_unload_context);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
if (!req.gseg)
return gru_unload_all_contexts();
gts = gru_find_lock_gts(req.gseg);
if (!gts)
return -EINVAL;
if (gts->ts_gru)
gru_unload_context(gts, 1);
gru_unlock_gts(gts);
return 0;
}
/*
* User request to flush a range of virtual addresses from the GRU TLB
* (Mainly for testing).
*/
int gru_user_flush_tlb(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_flush_tlb_req req;
struct gru_mm_struct *gms;
STAT(user_flush_tlb);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
req.vaddr, req.len);
gts = gru_find_lock_gts(req.gseg);
if (!gts)
return -EINVAL;
gms = gts->ts_gms;
gru_unlock_gts(gts);
gru_flush_tlb_range(gms, req.vaddr, req.len);
return 0;
}
/*
* Fetch GSEG statisticss
*/
long gru_get_gseg_statistics(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_get_gseg_statistics_req req;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
/*
* The library creates arrays of contexts for threaded programs.
* If no gts exists in the array, the context has never been used & all
* statistics are implicitly 0.
*/
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
gru_unlock_gts(gts);
} else {
memset(&req.stats, 0, sizeof(gts->ustats));
}
if (copy_to_user((void __user *)arg, &req, sizeof(req)))
return -EFAULT;
return 0;
}
/*
* Register the current task as the user of the GSEG slice.
* Needed for TLB fault interrupt targeting.
*/
int gru_set_context_option(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_set_context_option_req req;
int ret = 0;
STAT(set_context_option);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
gts = gru_find_lock_gts(req.gseg);
if (!gts) {
gts = gru_alloc_locked_gts(req.gseg);
if (IS_ERR(gts))
return PTR_ERR(gts);
}
switch (req.op) {
case sco_blade_chiplet:
/* Select blade/chiplet for GRU context */
if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
ret = -EINVAL;
} else {
gts->ts_user_blade_id = req.val1;
gts->ts_user_chiplet_id = req.val0;
gru_check_context_placement(gts);
}
break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;
break;
case sco_cch_req_slice:
/* Set the CCH slice option */
gts->ts_cch_req_slice = req.val1 & 3;
break;
default:
ret = -EINVAL;
}
gru_unlock_gts(gts);
return ret;
}
| gpl-2.0 |
d8ahazard/shooter-cm9-deviltoast | drivers/usb/otg/twl4030-usb.c | 2315 | 19432 | /*
* twl4030_usb - TWL4030 USB transceiver, talking to OMAP OTG controller
*
* Copyright (C) 2004-2007 Texas Instruments
* Copyright (C) 2008 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Current status:
* - HS USB ULPI mode works.
* - 3-pin mode support may be added in future.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/notifier.h>
#include <linux/slab.h>
/* Register defines */
#define MCPC_CTRL 0x30
#define MCPC_CTRL_RTSOL (1 << 7)
#define MCPC_CTRL_EXTSWR (1 << 6)
#define MCPC_CTRL_EXTSWC (1 << 5)
#define MCPC_CTRL_VOICESW (1 << 4)
#define MCPC_CTRL_OUT64K (1 << 3)
#define MCPC_CTRL_RTSCTSSW (1 << 2)
#define MCPC_CTRL_HS_UART (1 << 0)
#define MCPC_IO_CTRL 0x33
#define MCPC_IO_CTRL_MICBIASEN (1 << 5)
#define MCPC_IO_CTRL_CTS_NPU (1 << 4)
#define MCPC_IO_CTRL_RXD_PU (1 << 3)
#define MCPC_IO_CTRL_TXDTYP (1 << 2)
#define MCPC_IO_CTRL_CTSTYP (1 << 1)
#define MCPC_IO_CTRL_RTSTYP (1 << 0)
#define MCPC_CTRL2 0x36
#define MCPC_CTRL2_MCPC_CK_EN (1 << 0)
#define OTHER_FUNC_CTRL 0x80
#define OTHER_FUNC_CTRL_BDIS_ACON_EN (1 << 4)
#define OTHER_FUNC_CTRL_FIVEWIRE_MODE (1 << 2)
#define OTHER_IFC_CTRL 0x83
#define OTHER_IFC_CTRL_OE_INT_EN (1 << 6)
#define OTHER_IFC_CTRL_CEA2011_MODE (1 << 5)
#define OTHER_IFC_CTRL_FSLSSERIALMODE_4PIN (1 << 4)
#define OTHER_IFC_CTRL_HIZ_ULPI_60MHZ_OUT (1 << 3)
#define OTHER_IFC_CTRL_HIZ_ULPI (1 << 2)
#define OTHER_IFC_CTRL_ALT_INT_REROUTE (1 << 0)
#define OTHER_INT_EN_RISE 0x86
#define OTHER_INT_EN_FALL 0x89
#define OTHER_INT_STS 0x8C
#define OTHER_INT_LATCH 0x8D
#define OTHER_INT_VB_SESS_VLD (1 << 7)
#define OTHER_INT_DM_HI (1 << 6) /* not valid for "latch" reg */
#define OTHER_INT_DP_HI (1 << 5) /* not valid for "latch" reg */
#define OTHER_INT_BDIS_ACON (1 << 3) /* not valid for "fall" regs */
#define OTHER_INT_MANU (1 << 1)
#define OTHER_INT_ABNORMAL_STRESS (1 << 0)
#define ID_STATUS 0x96
#define ID_RES_FLOAT (1 << 4)
#define ID_RES_440K (1 << 3)
#define ID_RES_200K (1 << 2)
#define ID_RES_102K (1 << 1)
#define ID_RES_GND (1 << 0)
#define POWER_CTRL 0xAC
#define POWER_CTRL_OTG_ENAB (1 << 5)
#define OTHER_IFC_CTRL2 0xAF
#define OTHER_IFC_CTRL2_ULPI_STP_LOW (1 << 4)
#define OTHER_IFC_CTRL2_ULPI_TXEN_POL (1 << 3)
#define OTHER_IFC_CTRL2_ULPI_4PIN_2430 (1 << 2)
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_MASK (3 << 0) /* bits 0 and 1 */
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT1N (0 << 0)
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT2N (1 << 0)
#define REG_CTRL_EN 0xB2
#define REG_CTRL_ERROR 0xB5
#define ULPI_I2C_CONFLICT_INTEN (1 << 0)
#define OTHER_FUNC_CTRL2 0xB8
#define OTHER_FUNC_CTRL2_VBAT_TIMER_EN (1 << 0)
/* following registers do not have separate _clr and _set registers */
#define VBUS_DEBOUNCE 0xC0
#define ID_DEBOUNCE 0xC1
#define VBAT_TIMER 0xD3
#define PHY_PWR_CTRL 0xFD
#define PHY_PWR_PHYPWD (1 << 0)
#define PHY_CLK_CTRL 0xFE
#define PHY_CLK_CTRL_CLOCKGATING_EN (1 << 2)
#define PHY_CLK_CTRL_CLK32K_EN (1 << 1)
#define REQ_PHY_DPLL_CLK (1 << 0)
#define PHY_CLK_CTRL_STS 0xFF
#define PHY_DPLL_CLK (1 << 0)
/* In module TWL4030_MODULE_PM_MASTER */
#define STS_HW_CONDITIONS 0x0F
/* In module TWL4030_MODULE_PM_RECEIVER */
#define VUSB_DEDICATED1 0x7D
#define VUSB_DEDICATED2 0x7E
#define VUSB1V5_DEV_GRP 0x71
#define VUSB1V5_TYPE 0x72
#define VUSB1V5_REMAP 0x73
#define VUSB1V8_DEV_GRP 0x74
#define VUSB1V8_TYPE 0x75
#define VUSB1V8_REMAP 0x76
#define VUSB3V1_DEV_GRP 0x77
#define VUSB3V1_TYPE 0x78
#define VUSB3V1_REMAP 0x79
/* In module TWL4030_MODULE_INTBR */
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
struct twl4030_usb {
struct otg_transceiver otg;
struct device *dev;
/* TWL4030 internal USB regulator supplies */
struct regulator *usb1v5;
struct regulator *usb1v8;
struct regulator *usb3v1;
/* for vbus reporting with irqs disabled */
spinlock_t lock;
/* pin configuration */
enum twl4030_usb_mode usb_mode;
int irq;
u8 linkstat;
bool vbus_supplied;
u8 asleep;
bool irq_enabled;
};
/* internal define on top of container_of */
#define xceiv_to_twl(x) container_of((x), struct twl4030_usb, otg);
/*-------------------------------------------------------------------------*/
static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl,
u8 module, u8 data, u8 address)
{
u8 check;
if ((twl_i2c_write_u8(module, data, address) >= 0) &&
(twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
1, module, address, check, data);
/* Failed once: Try again */
if ((twl_i2c_write_u8(module, data, address) >= 0) &&
(twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
2, module, address, check, data);
/* Failed again: Return error */
return -EBUSY;
}
#define twl4030_usb_write_verify(twl, address, data) \
twl4030_i2c_write_u8_verify(twl, TWL4030_MODULE_USB, (data), (address))
static inline int twl4030_usb_write(struct twl4030_usb *twl,
u8 address, u8 data)
{
int ret = 0;
ret = twl_i2c_write_u8(TWL4030_MODULE_USB, data, address);
if (ret < 0)
dev_dbg(twl->dev,
"TWL4030:USB:Write[0x%x] Error %d\n", address, ret);
return ret;
}
static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address)
{
u8 data;
int ret = 0;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
ret = data;
else
dev_dbg(twl->dev,
"TWL4030:readb[0x%x,0x%x] Error %d\n",
module, address, ret);
return ret;
}
static inline int twl4030_usb_read(struct twl4030_usb *twl, u8 address)
{
return twl4030_readb(twl, TWL4030_MODULE_USB, address);
}
/*-------------------------------------------------------------------------*/
static inline int
twl4030_usb_set_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
return twl4030_usb_write(twl, ULPI_SET(reg), bits);
}
static inline int
twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
return twl4030_usb_write(twl, ULPI_CLR(reg), bits);
}
/*-------------------------------------------------------------------------*/
static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
{
int status;
int linkstat = USB_EVENT_NONE;
twl->vbus_supplied = false;
/*
* For ID/VBUS sensing, see manual section 15.4.8 ...
* except when using only battery backup power, two
* comparators produce VBUS_PRES and ID_PRES signals,
* which don't match docs elsewhere. But ... BIT(7)
* and BIT(2) of STS_HW_CONDITIONS, respectively, do
* seem to match up. If either is true the USB_PRES
* signal is active, the OTG module is activated, and
* its interrupt may be raised (may wake the system).
*/
status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
STS_HW_CONDITIONS);
if (status < 0)
dev_err(twl->dev, "USB link status err %d\n", status);
else if (status & (BIT(7) | BIT(2))) {
if (status & (BIT(7)))
twl->vbus_supplied = true;
if (status & BIT(2))
linkstat = USB_EVENT_ID;
else
linkstat = USB_EVENT_VBUS;
} else
linkstat = USB_EVENT_NONE;
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
twl->otg.last_event = linkstat;
/* REVISIT this assumes host and peripheral controllers
* are registered, and that both are active...
*/
spin_lock_irq(&twl->lock);
twl->linkstat = linkstat;
if (linkstat == USB_EVENT_ID) {
twl->otg.default_a = true;
twl->otg.state = OTG_STATE_A_IDLE;
} else {
twl->otg.default_a = false;
twl->otg.state = OTG_STATE_B_IDLE;
}
spin_unlock_irq(&twl->lock);
return linkstat;
}
static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
{
twl->usb_mode = mode;
switch (mode) {
case T2_USB_MODE_ULPI:
twl4030_usb_clear_bits(twl, ULPI_IFC_CTRL,
ULPI_IFC_CTRL_CARKITMODE);
twl4030_usb_set_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
twl4030_usb_clear_bits(twl, ULPI_FUNC_CTRL,
ULPI_FUNC_CTRL_XCVRSEL_MASK |
ULPI_FUNC_CTRL_OPMODE_MASK);
break;
case -1:
/* FIXME: power on defaults */
break;
default:
dev_err(twl->dev, "unsupported T2 transceiver mode %d\n",
mode);
break;
};
}
static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
{
unsigned long timeout;
int val = twl4030_usb_read(twl, PHY_CLK_CTRL);
if (val >= 0) {
if (on) {
/* enable DPLL to access PHY registers over I2C */
val |= REQ_PHY_DPLL_CLK;
WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
(u8)val) < 0);
timeout = jiffies + HZ;
while (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
PHY_DPLL_CLK)
&& time_before(jiffies, timeout))
udelay(10);
if (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
PHY_DPLL_CLK))
dev_err(twl->dev, "Timeout setting T2 HSUSB "
"PHY DPLL clock\n");
} else {
/* let ULPI control the DPLL clock */
val &= ~REQ_PHY_DPLL_CLK;
WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
(u8)val) < 0);
}
}
}
static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
{
u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
if (on)
pwr &= ~PHY_PWR_PHYPWD;
else
pwr |= PHY_PWR_PHYPWD;
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
static void twl4030_phy_power(struct twl4030_usb *twl, int on)
{
if (on) {
regulator_enable(twl->usb3v1);
regulator_enable(twl->usb1v8);
/*
* Disabling usb3v1 regulator (= writing 0 to VUSB3V1_DEV_GRP
* in twl4030) resets the VUSB_DEDICATED2 register. This reset
* enables VUSB3V1_SLEEP bit that remaps usb3v1 ACTIVE state to
* SLEEP. We work around this by clearing the bit after usv3v1
* is re-activated. This ensures that VUSB3V1 is really active.
*/
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
__twl4030_phy_power(twl, 1);
twl4030_usb_write(twl, PHY_CLK_CTRL,
twl4030_usb_read(twl, PHY_CLK_CTRL) |
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
} else {
__twl4030_phy_power(twl, 0);
regulator_disable(twl->usb1v5);
regulator_disable(twl->usb1v8);
regulator_disable(twl->usb3v1);
}
}
static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
{
if (twl->asleep)
return;
twl4030_phy_power(twl, 0);
twl->asleep = 1;
dev_dbg(twl->dev, "%s\n", __func__);
}
static void __twl4030_phy_resume(struct twl4030_usb *twl)
{
twl4030_phy_power(twl, 1);
twl4030_i2c_access(twl, 1);
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
}
static void twl4030_phy_resume(struct twl4030_usb *twl)
{
if (!twl->asleep)
return;
__twl4030_phy_resume(twl);
twl->asleep = 0;
dev_dbg(twl->dev, "%s\n", __func__);
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
{
/* Enable writing to power configuration registers */
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG1,
TWL4030_PM_MASTER_PROTECT_KEY);
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG2,
TWL4030_PM_MASTER_PROTECT_KEY);
/* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
/*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
/* Initialize 3.1V regulator */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
twl->usb3v1 = regulator_get(twl->dev, "usb3v1");
if (IS_ERR(twl->usb3v1))
return -ENODEV;
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
/* Initialize 1.5V regulator */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
twl->usb1v5 = regulator_get(twl->dev, "usb1v5");
if (IS_ERR(twl->usb1v5))
goto fail1;
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
/* Initialize 1.8V regulator */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
twl->usb1v8 = regulator_get(twl->dev, "usb1v8");
if (IS_ERR(twl->usb1v8))
goto fail2;
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
/* disable access to power configuration registers */
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
return 0;
fail2:
regulator_put(twl->usb1v5);
twl->usb1v5 = NULL;
fail1:
regulator_put(twl->usb3v1);
twl->usb3v1 = NULL;
return -ENODEV;
}
static ssize_t twl4030_usb_vbus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&twl->lock, flags);
ret = sprintf(buf, "%s\n",
twl->vbus_supplied ? "on" : "off");
spin_unlock_irqrestore(&twl->lock, flags);
return ret;
}
static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL);
static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
{
struct twl4030_usb *twl = _twl;
int status;
status = twl4030_usb_linkstat(twl);
if (status >= 0) {
/* FIXME add a set_power() method so that B-devices can
* configure the charger appropriately. It's not always
* correct to consume VBUS power, and how much current to
* consume is a function of the USB configuration chosen
* by the host.
*
* REVISIT usb_gadget_vbus_connect(...) as needed, ditto
* its disconnect() sibling, when changing to/from the
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
if (status == USB_EVENT_NONE)
twl4030_phy_suspend(twl, 0);
else
twl4030_phy_resume(twl);
atomic_notifier_call_chain(&twl->otg.notifier, status,
twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
return IRQ_HANDLED;
}
static void twl4030_usb_phy_init(struct twl4030_usb *twl)
{
int status;
status = twl4030_usb_linkstat(twl);
if (status >= 0) {
if (status == USB_EVENT_NONE) {
__twl4030_phy_power(twl, 0);
twl->asleep = 1;
} else {
__twl4030_phy_resume(twl);
twl->asleep = 0;
}
atomic_notifier_call_chain(&twl->otg.notifier, status,
twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
}
static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
{
struct twl4030_usb *twl = xceiv_to_twl(x);
if (suspend)
twl4030_phy_suspend(twl, 1);
else
twl4030_phy_resume(twl);
return 0;
}
static int twl4030_set_peripheral(struct otg_transceiver *x,
struct usb_gadget *gadget)
{
struct twl4030_usb *twl;
if (!x)
return -ENODEV;
twl = xceiv_to_twl(x);
twl->otg.gadget = gadget;
if (!gadget)
twl->otg.state = OTG_STATE_UNDEFINED;
return 0;
}
static int twl4030_set_host(struct otg_transceiver *x, struct usb_bus *host)
{
struct twl4030_usb *twl;
if (!x)
return -ENODEV;
twl = xceiv_to_twl(x);
twl->otg.host = host;
if (!host)
twl->otg.state = OTG_STATE_UNDEFINED;
return 0;
}
static int __devinit twl4030_usb_probe(struct platform_device *pdev)
{
struct twl4030_usb_data *pdata = pdev->dev.platform_data;
struct twl4030_usb *twl;
int status, err;
if (!pdata) {
dev_dbg(&pdev->dev, "platform_data not available\n");
return -EINVAL;
}
twl = kzalloc(sizeof *twl, GFP_KERNEL);
if (!twl)
return -ENOMEM;
twl->dev = &pdev->dev;
twl->irq = platform_get_irq(pdev, 0);
twl->otg.dev = twl->dev;
twl->otg.label = "twl4030";
twl->otg.set_host = twl4030_set_host;
twl->otg.set_peripheral = twl4030_set_peripheral;
twl->otg.set_suspend = twl4030_set_suspend;
twl->usb_mode = pdata->usb_mode;
twl->vbus_supplied = false;
twl->asleep = 1;
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
err = twl4030_usb_ldo_init(twl);
if (err) {
dev_err(&pdev->dev, "ldo init failed\n");
kfree(twl);
return err;
}
otg_set_transceiver(&twl->otg);
platform_set_drvdata(pdev, twl);
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
* FIXME we actually shouldn't start enabling it until the
* USB controller drivers have said they're ready, by calling
* set_host() and/or set_peripheral() ... OTG_capable boards
* need both handles, otherwise just one suffices.
*/
twl->irq_enabled = true;
status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_usb", twl);
if (status < 0) {
dev_dbg(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq, status);
kfree(twl);
return status;
}
/* Power down phy or make it work according to
* current link state.
*/
twl4030_usb_phy_init(twl);
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
}
static int __exit twl4030_usb_remove(struct platform_device *pdev)
{
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
free_irq(twl->irq, twl);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */
twl4030_usb_set_mode(twl, -1);
/* autogate 60MHz ULPI clock,
* clear dpll clock request for i2c access,
* disable 32KHz
*/
val = twl4030_usb_read(twl, PHY_CLK_CTRL);
if (val >= 0) {
val |= PHY_CLK_CTRL_CLOCKGATING_EN;
val &= ~(PHY_CLK_CTRL_CLK32K_EN | REQ_PHY_DPLL_CLK);
twl4030_usb_write(twl, PHY_CLK_CTRL, (u8)val);
}
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
if (!twl->asleep)
twl4030_phy_power(twl, 0);
regulator_put(twl->usb1v5);
regulator_put(twl->usb1v8);
regulator_put(twl->usb3v1);
kfree(twl);
return 0;
}
static struct platform_driver twl4030_usb_driver = {
.probe = twl4030_usb_probe,
.remove = __exit_p(twl4030_usb_remove),
.driver = {
.name = "twl4030_usb",
.owner = THIS_MODULE,
},
};
static int __init twl4030_usb_init(void)
{
return platform_driver_register(&twl4030_usb_driver);
}
subsys_initcall(twl4030_usb_init);
static void __exit twl4030_usb_exit(void)
{
platform_driver_unregister(&twl4030_usb_driver);
}
module_exit(twl4030_usb_exit);
MODULE_ALIAS("platform:twl4030_usb");
MODULE_AUTHOR("Texas Instruments, Inc, Nokia Corporation");
MODULE_DESCRIPTION("TWL4030 USB transceiver driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
alexax66/KK_kernel_serranove3g | drivers/media/rc/ir-rc6-decoder.c | 2571 | 7413 | /* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
*
* Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "rc-core-priv.h"
#include <linux/module.h>
/*
* This decoder currently supports:
* RC6-0-16 (standard toggle bit in header)
* RC6-6A-20 (no toggle bit)
* RC6-6A-24 (no toggle bit)
* RC6-6A-32 (MCE version with toggle bit in body)
*/
#define RC6_UNIT 444444 /* nanosecs */
#define RC6_HEADER_NBITS 4 /* not including toggle bit */
#define RC6_0_NBITS 16
#define RC6_6A_32_NBITS 32
#define RC6_6A_NBITS 128 /* Variable 8..128 */
#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
#define RC6_BIT_START (1 * RC6_UNIT)
#define RC6_BIT_END (1 * RC6_UNIT)
#define RC6_TOGGLE_START (2 * RC6_UNIT)
#define RC6_TOGGLE_END (2 * RC6_UNIT)
#define RC6_SUFFIX_SPACE (6 * RC6_UNIT)
#define RC6_MODE_MASK 0x07 /* for the header bits */
#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
#define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
#ifndef CHAR_BIT
#define CHAR_BIT 8 /* Normally in <limits.h> */
#endif
enum rc6_mode {
RC6_MODE_0,
RC6_MODE_6A,
RC6_MODE_UNKNOWN,
};
enum rc6_state {
STATE_INACTIVE,
STATE_PREFIX_SPACE,
STATE_HEADER_BIT_START,
STATE_HEADER_BIT_END,
STATE_TOGGLE_START,
STATE_TOGGLE_END,
STATE_BODY_BIT_START,
STATE_BODY_BIT_END,
STATE_FINISHED,
};
static enum rc6_mode rc6_mode(struct rc6_dec *data)
{
switch (data->header & RC6_MODE_MASK) {
case 0:
return RC6_MODE_0;
case 6:
if (!data->toggle)
return RC6_MODE_6A;
/* fall through */
default:
return RC6_MODE_UNKNOWN;
}
}
/**
* ir_rc6_decode() - Decode one RC6 pulse or space
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct rc6_dec *data = &dev->raw->rc6;
u32 scancode;
u8 toggle;
if (!(dev->enabled_protocols &
(RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
goto out;
again:
IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
return 0;
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
/* Note: larger margin on first pulse since each RC6_UNIT
is quite short and some hardware takes some time to
adjust to the signal */
if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
break;
data->state = STATE_PREFIX_SPACE;
data->count = 0;
return 0;
case STATE_PREFIX_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
break;
data->state = STATE_HEADER_BIT_START;
data->header = 0;
return 0;
case STATE_HEADER_BIT_START:
if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
break;
data->header <<= 1;
if (ev.pulse)
data->header |= 1;
data->count++;
data->state = STATE_HEADER_BIT_END;
return 0;
case STATE_HEADER_BIT_END:
if (!is_transition(&ev, &dev->raw->prev_ev))
break;
if (data->count == RC6_HEADER_NBITS)
data->state = STATE_TOGGLE_START;
else
data->state = STATE_HEADER_BIT_START;
decrease_duration(&ev, RC6_BIT_END);
goto again;
case STATE_TOGGLE_START:
if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
break;
data->toggle = ev.pulse;
data->state = STATE_TOGGLE_END;
return 0;
case STATE_TOGGLE_END:
if (!is_transition(&ev, &dev->raw->prev_ev) ||
!geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2))
break;
if (!(data->header & RC6_STARTBIT_MASK)) {
IR_dprintk(1, "RC6 invalid start bit\n");
break;
}
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, RC6_TOGGLE_END);
data->count = 0;
data->body = 0;
switch (rc6_mode(data)) {
case RC6_MODE_0:
data->wanted_bits = RC6_0_NBITS;
break;
case RC6_MODE_6A:
data->wanted_bits = RC6_6A_NBITS;
break;
default:
IR_dprintk(1, "RC6 unknown mode\n");
goto out;
}
goto again;
case STATE_BODY_BIT_START:
if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) {
/* Discard LSB's that won't fit in data->body */
if (data->count++ < CHAR_BIT * sizeof data->body) {
data->body <<= 1;
if (ev.pulse)
data->body |= 1;
}
data->state = STATE_BODY_BIT_END;
return 0;
} else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse &&
geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) {
data->state = STATE_FINISHED;
goto again;
}
break;
case STATE_BODY_BIT_END:
if (!is_transition(&ev, &dev->raw->prev_ev))
break;
if (data->count == data->wanted_bits)
data->state = STATE_FINISHED;
else
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, RC6_BIT_END);
goto again;
case STATE_FINISHED:
if (ev.pulse)
break;
switch (rc6_mode(data)) {
case RC6_MODE_0:
scancode = data->body;
toggle = data->toggle;
IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
scancode, toggle);
break;
case RC6_MODE_6A:
if (data->count > CHAR_BIT * sizeof data->body) {
IR_dprintk(1, "RC6 too many (%u) data bits\n",
data->count);
goto out;
}
scancode = data->body;
if (data->count == RC6_6A_32_NBITS &&
(scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
/* MCE RC */
toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0;
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
} else {
toggle = 0;
}
IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
scancode, toggle);
break;
default:
IR_dprintk(1, "RC6 unknown mode\n");
goto out;
}
rc_keydown(dev, scancode, toggle);
data->state = STATE_INACTIVE;
return 0;
}
out:
IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
};
static int __init ir_rc6_decode_init(void)
{
ir_raw_handler_register(&rc6_handler);
printk(KERN_INFO "IR RC6 protocol handler initialized\n");
return 0;
}
static void __exit ir_rc6_decode_exit(void)
{
ir_raw_handler_unregister(&rc6_handler);
}
module_init(ir_rc6_decode_init);
module_exit(ir_rc6_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("RC6 IR protocol decoder");
| gpl-2.0 |
johnhubbard/pnotify-linux-3.0.52 | drivers/hwmon/w83793.c | 3083 | 62075 | /*
w83793.c - Linux kernel driver for hardware monitoring
Copyright (C) 2006 Winbond Electronics Corp.
Yuan Mu
Rudolf Marek <r.marek@assembler.cz>
Copyright (C) 2009-2010 Sven Anders <anders@anduras.de>, ANDURAS AG.
Watchdog driver part
(Based partially on fschmd driver,
Copyright 2007-2008 by Hans de Goede)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation - version 2.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
*/
/*
Supports following chips:
Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
w83793 10 12 8 6 0x7b 0x5ca3 yes no
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kref.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
/* Default values */
#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
static int reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
static int timeout = WATCHDOG_TIMEOUT; /* default timeout in minutes */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in minutes. 2<= timeout <=255 (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
Address 0x00, 0x0d, 0x0e, 0x0f in all three banks are reserved
as ID, Bank Select registers
*/
#define W83793_REG_BANKSEL 0x00
#define W83793_REG_VENDORID 0x0d
#define W83793_REG_CHIPID 0x0e
#define W83793_REG_DEVICEID 0x0f
#define W83793_REG_CONFIG 0x40
#define W83793_REG_MFC 0x58
#define W83793_REG_FANIN_CTRL 0x5c
#define W83793_REG_FANIN_SEL 0x5d
#define W83793_REG_I2C_ADDR 0x0b
#define W83793_REG_I2C_SUBADDR 0x0c
#define W83793_REG_VID_INA 0x05
#define W83793_REG_VID_INB 0x06
#define W83793_REG_VID_LATCHA 0x07
#define W83793_REG_VID_LATCHB 0x08
#define W83793_REG_VID_CTRL 0x59
#define W83793_REG_WDT_LOCK 0x01
#define W83793_REG_WDT_ENABLE 0x02
#define W83793_REG_WDT_STATUS 0x03
#define W83793_REG_WDT_TIMEOUT 0x04
static u16 W83793_REG_TEMP_MODE[2] = { 0x5e, 0x5f };
#define TEMP_READ 0
#define TEMP_CRIT 1
#define TEMP_CRIT_HYST 2
#define TEMP_WARN 3
#define TEMP_WARN_HYST 4
/* only crit and crit_hyst affect real-time alarm status
current crit crit_hyst warn warn_hyst */
static u16 W83793_REG_TEMP[][5] = {
{0x1c, 0x78, 0x79, 0x7a, 0x7b},
{0x1d, 0x7c, 0x7d, 0x7e, 0x7f},
{0x1e, 0x80, 0x81, 0x82, 0x83},
{0x1f, 0x84, 0x85, 0x86, 0x87},
{0x20, 0x88, 0x89, 0x8a, 0x8b},
{0x21, 0x8c, 0x8d, 0x8e, 0x8f},
};
#define W83793_REG_TEMP_LOW_BITS 0x22
#define W83793_REG_BEEP(index) (0x53 + (index))
#define W83793_REG_ALARM(index) (0x4b + (index))
#define W83793_REG_CLR_CHASSIS 0x4a /* SMI MASK4 */
#define W83793_REG_IRQ_CTRL 0x50
#define W83793_REG_OVT_CTRL 0x51
#define W83793_REG_OVT_BEEP 0x52
#define IN_READ 0
#define IN_MAX 1
#define IN_LOW 2
static const u16 W83793_REG_IN[][3] = {
/* Current, High, Low */
{0x10, 0x60, 0x61}, /* Vcore A */
{0x11, 0x62, 0x63}, /* Vcore B */
{0x12, 0x64, 0x65}, /* Vtt */
{0x14, 0x6a, 0x6b}, /* VSEN1 */
{0x15, 0x6c, 0x6d}, /* VSEN2 */
{0x16, 0x6e, 0x6f}, /* +3VSEN */
{0x17, 0x70, 0x71}, /* +12VSEN */
{0x18, 0x72, 0x73}, /* 5VDD */
{0x19, 0x74, 0x75}, /* 5VSB */
{0x1a, 0x76, 0x77}, /* VBAT */
};
/* Low Bits of Vcore A/B Vtt Read/High/Low */
static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 };
static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 };
static u8 scale_in_add[] = { 0, 0, 0, 0, 0, 0, 0, 150, 150, 0 };
#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */
#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */
#define W83793_REG_PWM_DEFAULT 0xb2
#define W83793_REG_PWM_ENABLE 0x207
#define W83793_REG_PWM_UPTIME 0xc3 /* Unit in 0.1 second */
#define W83793_REG_PWM_DOWNTIME 0xc4 /* Unit in 0.1 second */
#define W83793_REG_TEMP_CRITICAL 0xc5
#define PWM_DUTY 0
#define PWM_START 1
#define PWM_NONSTOP 2
#define PWM_STOP_TIME 3
#define W83793_REG_PWM(index, nr) (((nr) == 0 ? 0xb3 : \
(nr) == 1 ? 0x220 : 0x218) + (index))
/* bit field, fan1 is bit0, fan2 is bit1 ... */
#define W83793_REG_TEMP_FAN_MAP(index) (0x201 + (index))
#define W83793_REG_TEMP_TOL(index) (0x208 + (index))
#define W83793_REG_TEMP_CRUISE(index) (0x210 + (index))
#define W83793_REG_PWM_STOP_TIME(index) (0x228 + (index))
#define W83793_REG_SF2_TEMP(index, nr) (0x230 + ((index) << 4) + (nr))
#define W83793_REG_SF2_PWM(index, nr) (0x238 + ((index) << 4) + (nr))
static inline unsigned long FAN_FROM_REG(u16 val)
{
if ((val >= 0xfff) || (val == 0))
return 0;
return (1350000UL / val);
}
static inline u16 FAN_TO_REG(long rpm)
{
if (rpm <= 0)
return 0x0fff;
return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long TIME_FROM_REG(u8 reg)
{
return (reg * 100);
}
static inline u8 TIME_TO_REG(unsigned long val)
{
return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
}
static inline long TEMP_FROM_REG(s8 reg)
{
return (reg * 1000);
}
static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
{
return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
}
struct w83793_data {
struct i2c_client *lm75[2];
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
nonvolatile registers */
u8 bank;
u8 vrm;
u8 vid[2];
u8 in[10][3]; /* Register value, read/high/low */
u8 in_low_bits[3]; /* Additional resolution for VCore A/B Vtt */
u16 has_fan; /* Only fan1- fan5 has own pins */
u16 fan[12]; /* Register value combine */
u16 fan_min[12]; /* Register value combine */
s8 temp[6][5]; /* current, crit, crit_hyst,warn, warn_hyst */
u8 temp_low_bits; /* Additional resolution TD1-TD4 */
u8 temp_mode[2]; /* byte 0: Temp D1-D4 mode each has 2 bits
byte 1: Temp R1,R2 mode, each has 1 bit */
u8 temp_critical; /* If reached all fan will be at full speed */
u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */
u8 has_pwm;
u8 has_temp;
u8 has_vid;
u8 pwm_enable; /* Register value, each Temp has 1 bit */
u8 pwm_uptime; /* Register value */
u8 pwm_downtime; /* Register value */
u8 pwm_default; /* All fan default pwm, next poweron valid */
u8 pwm[8][3]; /* Register value */
u8 pwm_stop_time[8];
u8 temp_cruise[6];
u8 alarms[5]; /* realtime status registers */
u8 beeps[5];
u8 beep_enable;
u8 tolerance[3]; /* Temp tolerance(Smart Fan I/II) */
u8 sf2_pwm[6][7]; /* Smart FanII: Fan duty cycle */
u8 sf2_temp[6][7]; /* Smart FanII: Temp level point */
/* watchdog */
struct i2c_client *client;
struct mutex watchdog_lock;
struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
struct miscdevice watchdog_miscdev;
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
unsigned int watchdog_caused_reboot;
int watchdog_timeout; /* watchdog timeout in minutes */
};
/* Somewhat ugly :( global data pointer list with all devices, so that
we can find our device data as when using misc_register. There is no
other method to get to one's device data from the open file-op and
for usage in the reboot notifier callback. */
static LIST_HEAD(watchdog_data_list);
/* Note this lock not only protect list access, but also data.kref access */
static DEFINE_MUTEX(watchdog_data_mutex);
/* Release our data struct when we're detached from the i2c client *and* all
references to our watchdog device are released */
static void w83793_release_resources(struct kref *ref)
{
struct w83793_data *data = container_of(ref, struct w83793_data, kref);
kfree(data);
}
static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
{ "w83793", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
static struct i2c_driver w83793_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83793",
},
.probe = w83793_probe,
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
.address_list = normal_i2c,
};
static ssize_t
show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t
show_vid(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
return sprintf(buf, "%d\n", vid_from_reg(data->vid[index], data->vrm));
}
static ssize_t
store_vrm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83793_data *data = dev_get_drvdata(dev);
data->vrm = simple_strtoul(buf, NULL, 10);
return count;
}
#define ALARM_STATUS 0
#define BEEP_ENABLE 1
static ssize_t
show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index >> 3;
int bit = sensor_attr->index & 0x07;
u8 val;
if (ALARM_STATUS == nr) {
val = (data->alarms[index] >> (bit)) & 1;
} else { /* BEEP_ENABLE */
val = (data->beeps[index] >> (bit)) & 1;
}
return sprintf(buf, "%u\n", val);
}
static ssize_t
store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index >> 3;
int shift = sensor_attr->index & 0x07;
u8 beep_bit = 1 << shift;
u8 val;
val = simple_strtoul(buf, NULL, 10);
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps[index] = w83793_read_value(client, W83793_REG_BEEP(index));
data->beeps[index] &= ~beep_bit;
data->beeps[index] |= val << shift;
w83793_write_value(client, W83793_REG_BEEP(index), data->beeps[index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%u\n", (data->beep_enable >> 1) & 0x01);
}
static ssize_t
store_beep_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val = simple_strtoul(buf, NULL, 10);
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP)
& 0xfd;
data->beep_enable |= val << 1;
w83793_write_value(client, W83793_REG_OVT_BEEP, data->beep_enable);
mutex_unlock(&data->update_lock);
return count;
}
/* Write any value to clear chassis alarm */
static ssize_t
store_chassis_clear_legacy(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val;
dev_warn(dev, "Attribute chassis is deprecated, "
"use intrusion0_alarm instead\n");
mutex_lock(&data->update_lock);
val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
val |= 0x80;
w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
mutex_unlock(&data->update_lock);
return count;
}
/* Write 0 to clear chassis alarm */
static ssize_t
store_chassis_clear(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
u8 reg;
if (strict_strtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
#define FAN_INPUT 0
#define FAN_MIN 1
static ssize_t
show_fan(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val;
if (FAN_INPUT == nr) {
val = data->fan[index] & 0x0fff;
} else {
val = data->fan_min[index] & 0x0fff;
}
return sprintf(buf, "%lu\n", FAN_FROM_REG(val));
}
static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u16 val = FAN_TO_REG(simple_strtoul(buf, NULL, 10));
mutex_lock(&data->update_lock);
data->fan_min[index] = val;
w83793_write_value(client, W83793_REG_FAN_MIN(index),
(val >> 8) & 0xff);
w83793_write_value(client, W83793_REG_FAN_MIN(index) + 1, val & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
struct w83793_data *data = w83793_update_device(dev);
u16 val;
int nr = sensor_attr->nr;
int index = sensor_attr->index;
if (PWM_STOP_TIME == nr)
val = TIME_FROM_REG(data->pwm_stop_time[index]);
else
val = (data->pwm[index][nr] & 0x3f) << 2;
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val;
mutex_lock(&data->update_lock);
if (PWM_STOP_TIME == nr) {
val = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_stop_time[index] = val;
w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
val);
} else {
val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff)
>> 2;
data->pwm[index][nr] =
w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
data->pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_PWM(index, nr),
data->pwm[index][nr]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
long temp = TEMP_FROM_REG(data->temp[index][nr]);
if (TEMP_READ == nr && index < 4) { /* Only TD1-TD4 have low bits */
int low = ((data->temp_low_bits >> (index * 2)) & 0x03) * 250;
temp += temp > 0 ? low : -low;
}
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
store_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
long tmp = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp[index][nr] = TEMP_TO_REG(tmp, -128, 127);
w83793_write_value(client, W83793_REG_TEMP[index][nr],
data->temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/*
TD1-TD4
each has 4 mode:(2 bits)
0: Stop monitor
1: Use internal temp sensor(default)
2: Reserved
3: Use sensor in Intel CPU and get result by PECI
TR1-TR2
each has 2 mode:(1 bit)
0: Disable temp sensor monitor
1: To enable temp sensors monitor
*/
/* 0 disable, 6 PECI */
static u8 TO_TEMP_MODE[] = { 0, 0, 0, 6 };
static ssize_t
show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
u8 tmp;
index = (index < 4) ? 0 : 1;
tmp = (data->temp_mode[index] >> shift) & mask;
/* for the internal sensor, found out if diode or thermistor */
if (tmp == 1) {
tmp = index == 0 ? 3 : 4;
} else {
tmp = TO_TEMP_MODE[tmp];
}
return sprintf(buf, "%d\n", tmp);
}
static ssize_t
store_temp_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
u8 val = simple_strtoul(buf, NULL, 10);
/* transform the sysfs interface values into table above */
if ((val == 6) && (index < 4)) {
val -= 3;
} else if ((val == 3 && index < 4)
|| (val == 4 && index >= 4)) {
/* transform diode or thermistor into internal enable */
val = !!val;
} else {
return -EINVAL;
}
index = (index < 4) ? 0 : 1;
mutex_lock(&data->update_lock);
data->temp_mode[index] =
w83793_read_value(client, W83793_REG_TEMP_MODE[index]);
data->temp_mode[index] &= ~(mask << shift);
data->temp_mode[index] |= val << shift;
w83793_write_value(client, W83793_REG_TEMP_MODE[index],
data->temp_mode[index]);
mutex_unlock(&data->update_lock);
return count;
}
#define SETUP_PWM_DEFAULT 0
#define SETUP_PWM_UPTIME 1 /* Unit in 0.1s */
#define SETUP_PWM_DOWNTIME 2 /* Unit in 0.1s */
#define SETUP_TEMP_CRITICAL 3
static ssize_t
show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct w83793_data *data = w83793_update_device(dev);
u32 val = 0;
if (SETUP_PWM_DEFAULT == nr) {
val = (data->pwm_default & 0x3f) << 2;
} else if (SETUP_PWM_UPTIME == nr) {
val = TIME_FROM_REG(data->pwm_uptime);
} else if (SETUP_PWM_DOWNTIME == nr) {
val = TIME_FROM_REG(data->pwm_downtime);
} else if (SETUP_TEMP_CRITICAL == nr) {
val = TEMP_FROM_REG(data->temp_critical & 0x7f);
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_setup(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (SETUP_PWM_DEFAULT == nr) {
data->pwm_default =
w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
data->pwm_default |= SENSORS_LIMIT(simple_strtoul(buf, NULL,
10),
0, 0xff) >> 2;
w83793_write_value(client, W83793_REG_PWM_DEFAULT,
data->pwm_default);
} else if (SETUP_PWM_UPTIME == nr) {
data->pwm_uptime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_uptime += data->pwm_uptime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_UPTIME,
data->pwm_uptime);
} else if (SETUP_PWM_DOWNTIME == nr) {
data->pwm_downtime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_downtime += data->pwm_downtime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_DOWNTIME,
data->pwm_downtime);
} else { /* SETUP_TEMP_CRITICAL */
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL) & 0x80;
data->temp_critical |= TEMP_TO_REG(simple_strtol(buf, NULL, 10),
0, 0x7f);
w83793_write_value(client, W83793_REG_TEMP_CRITICAL,
data->temp_critical);
}
mutex_unlock(&data->update_lock);
return count;
}
/*
Temp SmartFan control
TEMP_FAN_MAP
Temp channel control which pwm fan, bitfield, bit 0 indicate pwm1...
It's possible two or more temp channels control the same fan, w83793
always prefers to pick the most critical request and applies it to
the related Fan.
It's possible one fan is not in any mapping of 6 temp channels, this
means the fan is manual mode
TEMP_PWM_ENABLE
Each temp channel has its own SmartFan mode, and temp channel
control fans that are set by TEMP_FAN_MAP
0: SmartFanII mode
1: Thermal Cruise Mode
TEMP_CRUISE
Target temperature in thermal cruise mode, w83793 will try to turn
fan speed to keep the temperature of target device around this
temperature.
TEMP_TOLERANCE
If Temp higher or lower than target with this tolerance, w83793
will take actions to speed up or slow down the fan to keep the
temperature within the tolerance range.
*/
#define TEMP_FAN_MAP 0
#define TEMP_PWM_ENABLE 1
#define TEMP_CRUISE 2
#define TEMP_TOLERANCE 3
static ssize_t
show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u32 val;
if (TEMP_FAN_MAP == nr) {
val = data->temp_fan_map[index];
} else if (TEMP_PWM_ENABLE == nr) {
/* +2 to transfrom into 2 and 3 to conform with sysfs intf */
val = ((data->pwm_enable >> index) & 0x01) + 2;
} else if (TEMP_CRUISE == nr) {
val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
} else { /* TEMP_TOLERANCE */
val = data->tolerance[index >> 1] >> ((index & 0x01) ? 4 : 0);
val = TEMP_FROM_REG(val & 0x0f);
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_ctrl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u32 val;
mutex_lock(&data->update_lock);
if (TEMP_FAN_MAP == nr) {
val = simple_strtoul(buf, NULL, 10) & 0xff;
w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
data->temp_fan_map[index] = val;
} else if (TEMP_PWM_ENABLE == nr) {
val = simple_strtoul(buf, NULL, 10);
if (2 == val || 3 == val) {
data->pwm_enable =
w83793_read_value(client, W83793_REG_PWM_ENABLE);
if (val - 2)
data->pwm_enable |= 1 << index;
else
data->pwm_enable &= ~(1 << index);
w83793_write_value(client, W83793_REG_PWM_ENABLE,
data->pwm_enable);
} else {
mutex_unlock(&data->update_lock);
return -EINVAL;
}
} else if (TEMP_CRUISE == nr) {
data->temp_cruise[index] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(index));
val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
data->temp_cruise[index] &= 0x80;
data->temp_cruise[index] |= val;
w83793_write_value(client, W83793_REG_TEMP_CRUISE(index),
data->temp_cruise[index]);
} else { /* TEMP_TOLERANCE */
int i = index >> 1;
u8 shift = (index & 0x01) ? 4 : 0;
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x0f);
data->tolerance[i] &= ~(0x0f << shift);
data->tolerance[i] |= val << shift;
w83793_write_value(client, W83793_REG_TEMP_TOL(i),
data->tolerance[i]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%d\n", (data->sf2_pwm[index][nr] & 0x3f) << 2);
}
static ssize_t
store_sf2_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff) >> 2;
mutex_lock(&data->update_lock);
data->sf2_pwm[index][nr] =
w83793_read_value(client, W83793_REG_SF2_PWM(index, nr)) & 0xc0;
data->sf2_pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_PWM(index, nr),
data->sf2_pwm[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%ld\n",
TEMP_FROM_REG(data->sf2_temp[index][nr] & 0x7f));
}
static ssize_t
store_sf2_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
mutex_lock(&data->update_lock);
data->sf2_temp[index][nr] =
w83793_read_value(client, W83793_REG_SF2_TEMP(index, nr)) & 0x80;
data->sf2_temp[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_TEMP(index, nr),
data->sf2_temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* only Vcore A/B and Vtt have additional 2 bits precision */
static ssize_t
show_in(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val = data->in[index][nr];
if (index < 3) {
val <<= 2;
val += (data->in_low_bits[nr] >> (index * 2)) & 0x3;
}
/* voltage inputs 5VDD and 5VSB needs 150mV offset */
val = val * scale_in[index] + scale_in_add[index];
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u32 val;
val =
(simple_strtoul(buf, NULL, 10) +
scale_in[index] / 2) / scale_in[index];
mutex_lock(&data->update_lock);
if (index > 2) {
/* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
if (1 == nr || 2 == nr) {
val -= scale_in_add[index] / scale_in[index];
}
val = SENSORS_LIMIT(val, 0, 255);
} else {
val = SENSORS_LIMIT(val, 0, 0x3FF);
data->in_low_bits[nr] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
data->in_low_bits[nr] &= ~(0x03 << (2 * index));
data->in_low_bits[nr] |= (val & 0x03) << (2 * index);
w83793_write_value(client, W83793_REG_IN_LOW_BITS[nr],
data->in_low_bits[nr]);
val >>= 2;
}
data->in[index][nr] = val;
w83793_write_value(client, W83793_REG_IN[index][nr],
data->in[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define NOT_USED -1
#define SENSOR_ATTR_IN(index) \
SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
IN_READ, index), \
SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_MAX, index), \
SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_LOW, index), \
SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + ((index > 2) ? 1 : 0)), \
SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, \
index + ((index > 2) ? 1 : 0))
#define SENSOR_ATTR_FAN(index) \
SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + 17), \
SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 17), \
SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
NULL, FAN_INPUT, index - 1), \
SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
show_fan, store_fan_min, FAN_MIN, index - 1)
#define SENSOR_ATTR_PWM(index) \
SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
store_pwm, PWM_DUTY, index - 1), \
SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_START, index - 1), \
SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_STOP_TIME, index - 1)
#define SENSOR_ATTR_TEMP(index) \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | S_IWUSR, \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \
SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_CRIT, index - 1), \
SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_warn, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_WARN, index - 1), \
SENSOR_ATTR_2(temp##index##_warn_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
show_alarm_beep, NULL, ALARM_STATUS, index + 11), \
SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 11), \
SENSOR_ATTR_2(temp##index##_auto_channels_pwm, \
S_IRUGO | S_IWUSR, show_sf_ctrl, store_sf_ctrl, \
TEMP_FAN_MAP, index - 1), \
SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
show_sf_ctrl, store_sf_ctrl, TEMP_PWM_ENABLE, \
index - 1), \
SENSOR_ATTR_2(thermal_cruise##index, S_IRUGO | S_IWUSR, \
show_sf_ctrl, store_sf_ctrl, TEMP_CRUISE, index - 1), \
SENSOR_ATTR_2(tolerance##index, S_IRUGO | S_IWUSR, show_sf_ctrl,\
store_sf_ctrl, TEMP_TOLERANCE, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 6, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 6, index - 1)
static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
SENSOR_ATTR_IN(0),
SENSOR_ATTR_IN(1),
SENSOR_ATTR_IN(2),
SENSOR_ATTR_IN(3),
SENSOR_ATTR_IN(4),
SENSOR_ATTR_IN(5),
SENSOR_ATTR_IN(6),
SENSOR_ATTR_IN(7),
SENSOR_ATTR_IN(8),
SENSOR_ATTR_IN(9),
SENSOR_ATTR_FAN(1),
SENSOR_ATTR_FAN(2),
SENSOR_ATTR_FAN(3),
SENSOR_ATTR_FAN(4),
SENSOR_ATTR_FAN(5),
SENSOR_ATTR_PWM(1),
SENSOR_ATTR_PWM(2),
SENSOR_ATTR_PWM(3),
};
static struct sensor_device_attribute_2 w83793_temp[] = {
SENSOR_ATTR_TEMP(1),
SENSOR_ATTR_TEMP(2),
SENSOR_ATTR_TEMP(3),
SENSOR_ATTR_TEMP(4),
SENSOR_ATTR_TEMP(5),
SENSOR_ATTR_TEMP(6),
};
/* Fan6-Fan12 */
static struct sensor_device_attribute_2 w83793_left_fan[] = {
SENSOR_ATTR_FAN(6),
SENSOR_ATTR_FAN(7),
SENSOR_ATTR_FAN(8),
SENSOR_ATTR_FAN(9),
SENSOR_ATTR_FAN(10),
SENSOR_ATTR_FAN(11),
SENSOR_ATTR_FAN(12),
};
/* Pwm4-Pwm8 */
static struct sensor_device_attribute_2 w83793_left_pwm[] = {
SENSOR_ATTR_PWM(4),
SENSOR_ATTR_PWM(5),
SENSOR_ATTR_PWM(6),
SENSOR_ATTR_PWM(7),
SENSOR_ATTR_PWM(8),
};
static struct sensor_device_attribute_2 w83793_vid[] = {
SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
};
static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
static struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear_legacy, ALARM_STATUS, 30),
SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear, ALARM_STATUS, 30),
SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
store_beep_enable, NOT_USED, NOT_USED),
SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
SENSOR_ATTR_2(temp_critical, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_TEMP_CRITICAL, NOT_USED),
};
static void w83793_init_client(struct i2c_client *client)
{
if (reset) {
w83793_write_value(client, W83793_REG_CONFIG, 0x80);
}
/* Start monitoring */
w83793_write_value(client, W83793_REG_CONFIG,
w83793_read_value(client, W83793_REG_CONFIG) | 0x01);
}
/*
* Watchdog routines
*/
static int watchdog_set_timeout(struct w83793_data *data, int timeout)
{
int ret, mtimeout;
mtimeout = DIV_ROUND_UP(timeout, 60);
if (mtimeout > 255)
return -EINVAL;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
data->watchdog_timeout = mtimeout;
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
ret = mtimeout * 60;
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_get_timeout(struct w83793_data *data)
{
int timeout;
mutex_lock(&data->watchdog_lock);
timeout = data->watchdog_timeout * 60;
mutex_unlock(&data->watchdog_lock);
return timeout;
}
static int watchdog_trigger(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_enable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set initial timeout */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
/* Enable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0x55);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_disable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Disable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0xAA);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_open(struct inode *inode, struct file *filp)
{
struct w83793_data *pos, *data = NULL;
int watchdog_is_open;
/* We get called from drivers/char/misc.c with misc_mtx hold, and we
call misc_register() from w83793_probe() with watchdog_data_mutex
hold, as misc_register() takes the misc_mtx lock, this is a possible
deadlock, so we use mutex_trylock here. */
if (!mutex_trylock(&watchdog_data_mutex))
return -ERESTARTSYS;
list_for_each_entry(pos, &watchdog_data_list, list) {
if (pos->watchdog_miscdev.minor == iminor(inode)) {
data = pos;
break;
}
}
/* Check, if device is already open */
watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
/* Increase data reference counter (if not already done).
Note we can never not have found data, so we don't check for this */
if (!watchdog_is_open)
kref_get(&data->kref);
mutex_unlock(&watchdog_data_mutex);
/* Check, if device is already open and possibly issue error */
if (watchdog_is_open)
return -EBUSY;
/* Enable Soft Watchdog */
watchdog_enable(data);
/* Store pointer to data into filp's private data */
filp->private_data = data;
return nonseekable_open(inode, filp);
}
static int watchdog_close(struct inode *inode, struct file *filp)
{
struct w83793_data *data = filp->private_data;
if (data->watchdog_expect_close) {
watchdog_disable(data);
data->watchdog_expect_close = 0;
} else {
watchdog_trigger(data);
dev_crit(&data->client->dev,
"unexpected close, not stopping watchdog!\n");
}
clear_bit(0, &data->watchdog_is_open);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static ssize_t watchdog_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
ssize_t ret;
struct w83793_data *data = filp->private_data;
if (count) {
if (!nowayout) {
size_t i;
/* Clear it in case it was set with a previous write */
data->watchdog_expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
data->watchdog_expect_close = 1;
}
}
ret = watchdog_trigger(data);
if (ret < 0)
return ret;
}
return count;
}
static long watchdog_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_CARDRESET,
.identity = "w83793 watchdog"
};
int val, ret = 0;
struct w83793_data *data = filp->private_data;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
ident.options |= WDIOF_MAGICCLOSE;
if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
val = data->watchdog_caused_reboot ? WDIOF_CARDRESET : 0;
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
ret = watchdog_trigger(data);
break;
case WDIOC_GETTIMEOUT:
val = watchdog_get_timeout(data);
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_SETTIMEOUT:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
ret = watchdog_set_timeout(data, val);
if (ret > 0)
ret = put_user(ret, (int __user *)arg);
break;
case WDIOC_SETOPTIONS:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
if (val & WDIOS_DISABLECARD)
ret = watchdog_disable(data);
else if (val & WDIOS_ENABLECARD)
ret = watchdog_enable(data);
else
ret = -EINVAL;
break;
default:
ret = -ENOTTY;
}
return ret;
}
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
};
/*
* Notifier for system down
*/
static int watchdog_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
struct w83793_data *data = NULL;
if (code == SYS_DOWN || code == SYS_HALT) {
/* Disable each registered watchdog */
mutex_lock(&watchdog_data_mutex);
list_for_each_entry(data, &watchdog_data_list, list) {
if (data->watchdog_miscdev.minor)
watchdog_disable(data);
}
mutex_unlock(&watchdog_data_mutex);
}
return NOTIFY_DONE;
}
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block watchdog_notifier = {
.notifier_call = watchdog_notify_sys,
};
/*
* Init / remove routines
*/
static int w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
int i, tmp;
/* Unregister the watchdog (if registered) */
if (data->watchdog_miscdev.minor) {
misc_deregister(&data->watchdog_miscdev);
if (data->watchdog_is_open) {
dev_warn(&client->dev,
"i2c client detached with watchdog open! "
"Stopping watchdog.\n");
watchdog_disable(data);
}
mutex_lock(&watchdog_data_mutex);
list_del(&data->list);
mutex_unlock(&watchdog_data_mutex);
/* Tell the watchdog code the client is gone */
mutex_lock(&data->watchdog_lock);
data->client = NULL;
mutex_unlock(&data->watchdog_lock);
}
/* Reset Configuration Register to Disable Watch Dog Registers */
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp & ~0x04);
unregister_reboot_notifier(&watchdog_notifier);
hwmon_device_unregister(data->hwmon_dev);
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
device_remove_file(dev, &dev_attr_vrm);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static int
w83793_detect_subclients(struct i2c_client *client)
{
int i, id, err;
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48
|| force_subclients[i] > 0x4f) {
dev_err(&client->dev,
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -EINVAL;
goto ERROR_SC_0;
}
}
w83793_write_value(client, W83793_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08)) {
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
}
if (!(tmp & 0x80)) {
if ((data->lm75[0] != NULL)
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
err = -ENODEV;
goto ERROR_SC_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((tmp >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
ERROR_SC_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
u8 tmp, bank, chip_id;
struct i2c_adapter *adapter = client->adapter;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
return -ENODEV;
}
bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
tmp = bank & 0x80 ? 0x5c : 0xa3;
/* Check Winbond vendor ID */
if (tmp != i2c_smbus_read_byte_data(client, W83793_REG_VENDORID)) {
pr_debug("w83793: Detection failed at check vendor id\n");
return -ENODEV;
}
/* If Winbond chip, address of chip and W83793_REG_I2C_ADDR
should match */
if ((bank & 0x07) == 0
&& i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) !=
(address << 1)) {
pr_debug("w83793: Detection failed at check i2c addr\n");
return -ENODEV;
}
/* Determine the chip type now */
chip_id = i2c_smbus_read_byte_data(client, W83793_REG_CHIPID);
if (chip_id != 0x7b)
return -ENODEV;
strlcpy(info->type, "w83793", I2C_NAME_SIZE);
return 0;
}
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
int files_temp = ARRAY_SIZE(w83793_temp) / 6;
data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
mutex_init(&data->update_lock);
mutex_init(&data->watchdog_lock);
INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
/* Store client pointer in our data struct for watchdog usage
(where the client is found through a data ptr instead of the
otherway around) */
data->client = client;
err = w83793_detect_subclients(client);
if (err)
goto free_mem;
/* Initialize the chip */
w83793_init_client(client);
/*
Only fan 1-5 has their own input pins,
Pwm 1-3 has their own pins
*/
data->has_fan = 0x1f;
data->has_pwm = 0x07;
tmp = w83793_read_value(client, W83793_REG_MFC);
val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
/* check the function of pins 49-56 */
if (tmp & 0x80) {
data->has_vid |= 0x2; /* has VIDB */
} else {
data->has_pwm |= 0x18; /* pwm 4,5 */
if (val & 0x01) { /* fan 6 */
data->has_fan |= 0x20;
data->has_pwm |= 0x20;
}
if (val & 0x02) { /* fan 7 */
data->has_fan |= 0x40;
data->has_pwm |= 0x40;
}
if (!(tmp & 0x40) && (val & 0x04)) { /* fan 8 */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
}
/* check the function of pins 37-40 */
if (!(tmp & 0x29))
data->has_vid |= 0x1; /* has VIDA */
if (0x08 == (tmp & 0x0c)) {
if (val & 0x08) /* fan 9 */
data->has_fan |= 0x100;
if (val & 0x10) /* fan 10 */
data->has_fan |= 0x200;
}
if (0x20 == (tmp & 0x30)) {
if (val & 0x20) /* fan 11 */
data->has_fan |= 0x400;
if (val & 0x40) /* fan 12 */
data->has_fan |= 0x800;
}
if ((tmp & 0x01) && (val & 0x04)) { /* fan 8, second location */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
tmp = w83793_read_value(client, W83793_REG_FANIN_SEL);
if ((tmp & 0x01) && (val & 0x08)) { /* fan 9, second location */
data->has_fan |= 0x100;
}
if ((tmp & 0x02) && (val & 0x10)) { /* fan 10, second location */
data->has_fan |= 0x200;
}
if ((tmp & 0x04) && (val & 0x20)) { /* fan 11, second location */
data->has_fan |= 0x400;
}
if ((tmp & 0x08) && (val & 0x40)) { /* fan 12, second location */
data->has_fan |= 0x800;
}
/* check the temp1-6 mode, ignore former AMDSI selected inputs */
tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[0]);
if (tmp & 0x01)
data->has_temp |= 0x01;
if (tmp & 0x04)
data->has_temp |= 0x02;
if (tmp & 0x10)
data->has_temp |= 0x04;
if (tmp & 0x40)
data->has_temp |= 0x08;
tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[1]);
if (tmp & 0x01)
data->has_temp |= 0x10;
if (tmp & 0x02)
data->has_temp |= 0x20;
/* Register sysfs hooks */
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
err = device_create_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) {
if (!(data->has_vid & (1 << i)))
continue;
err = device_create_file(dev, &w83793_vid[i].dev_attr);
if (err)
goto exit_remove;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
err = device_create_file(dev, &dev_attr_vrm);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
err = device_create_file(dev, &sda_single_files[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < 6; i++) {
int j;
if (!(data->has_temp & (1 << i)))
continue;
for (j = 0; j < files_temp; j++) {
err = device_create_file(dev,
&w83793_temp[(i) * files_temp
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 5; i < 12; i++) {
int j;
if (!(data->has_fan & (1 << i)))
continue;
for (j = 0; j < files_fan; j++) {
err = device_create_file(dev,
&w83793_left_fan[(i - 5) * files_fan
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 3; i < 8; i++) {
int j;
if (!(data->has_pwm & (1 << i)))
continue;
for (j = 0; j < files_pwm; j++) {
err = device_create_file(dev,
&w83793_left_pwm[(i - 3) * files_pwm
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
/* Watchdog initialization */
/* Register boot notifier */
err = register_reboot_notifier(&watchdog_notifier);
if (err != 0) {
dev_err(&client->dev,
"cannot register reboot notifier (err=%d)\n", err);
goto exit_devunreg;
}
/* Enable Watchdog registers.
Set Configuration Register to Enable Watch Dog Registers
(Bit 2) = XXXX, X1XX. */
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp | 0x04);
/* Set the default watchdog timeout */
data->watchdog_timeout = timeout;
/* Check, if last reboot was caused by watchdog */
data->watchdog_caused_reboot =
w83793_read_value(data->client, W83793_REG_WDT_STATUS) & 0x01;
/* Disable Soft Watchdog during initialiation */
watchdog_disable(data);
/* We take the data_mutex lock early so that watchdog_open() cannot
run when misc_register() has completed, but we've not yet added
our data to the watchdog_data_list (and set the default timeout) */
mutex_lock(&watchdog_data_mutex);
for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
/* Register our watchdog part */
snprintf(data->watchdog_name, sizeof(data->watchdog_name),
"watchdog%c", (i == 0) ? '\0' : ('0' + i));
data->watchdog_miscdev.name = data->watchdog_name;
data->watchdog_miscdev.fops = &watchdog_fops;
data->watchdog_miscdev.minor = watchdog_minors[i];
err = misc_register(&data->watchdog_miscdev);
if (err == -EBUSY)
continue;
if (err) {
data->watchdog_miscdev.minor = 0;
dev_err(&client->dev,
"Registering watchdog chardev: %d\n", err);
break;
}
list_add(&data->list, &watchdog_data_list);
dev_info(&client->dev,
"Registered watchdog chardev major 10, minor: %d\n",
watchdog_minors[i]);
break;
}
if (i == ARRAY_SIZE(watchdog_minors)) {
data->watchdog_miscdev.minor = 0;
dev_warn(&client->dev, "Couldn't register watchdog chardev "
"(due to no free minor)\n");
}
mutex_unlock(&watchdog_data_mutex);
return 0;
/* Unregister hwmon device */
exit_devunreg:
hwmon_device_unregister(data->hwmon_dev);
/* Unregister sysfs hooks */
exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev, &w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
return err;
}
static void w83793_update_nonvolatile(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i, j;
/*
They are somewhat "stable" registers, and to update them every time
takes so much time, it's just not worthy. Update them in a long
interval to avoid exception.
*/
if (!(time_after(jiffies, data->last_nonvolatile + HZ * 300)
|| !data->valid))
return;
/* update voltage limits */
for (i = 1; i < 3; i++) {
for (j = 0; j < ARRAY_SIZE(data->in); j++) {
data->in[j][i] =
w83793_read_value(client, W83793_REG_IN[j][i]);
}
data->in_low_bits[i] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[i]);
}
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
/* Update the Fan measured value and limits */
if (!(data->has_fan & (1 << i))) {
continue;
}
data->fan_min[i] =
w83793_read_value(client, W83793_REG_FAN_MIN(i)) << 8;
data->fan_min[i] |=
w83793_read_value(client, W83793_REG_FAN_MIN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp_fan_map[i] =
w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i));
for (j = 1; j < 5; j++) {
data->temp[i][j] =
w83793_read_value(client, W83793_REG_TEMP[i][j]);
}
data->temp_cruise[i] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(i));
for (j = 0; j < 7; j++) {
data->sf2_pwm[i][j] =
w83793_read_value(client, W83793_REG_SF2_PWM(i, j));
data->sf2_temp[i][j] =
w83793_read_value(client,
W83793_REG_SF2_TEMP(i, j));
}
}
for (i = 0; i < ARRAY_SIZE(data->temp_mode); i++)
data->temp_mode[i] =
w83793_read_value(client, W83793_REG_TEMP_MODE[i]);
for (i = 0; i < ARRAY_SIZE(data->tolerance); i++) {
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
}
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (!(data->has_pwm & (1 << i)))
continue;
data->pwm[i][PWM_NONSTOP] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_NONSTOP));
data->pwm[i][PWM_START] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_START));
data->pwm_stop_time[i] =
w83793_read_value(client, W83793_REG_PWM_STOP_TIME(i));
}
data->pwm_default = w83793_read_value(client, W83793_REG_PWM_DEFAULT);
data->pwm_enable = w83793_read_value(client, W83793_REG_PWM_ENABLE);
data->pwm_uptime = w83793_read_value(client, W83793_REG_PWM_UPTIME);
data->pwm_downtime = w83793_read_value(client, W83793_REG_PWM_DOWNTIME);
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP);
for (i = 0; i < ARRAY_SIZE(data->beeps); i++) {
data->beeps[i] = w83793_read_value(client, W83793_REG_BEEP(i));
}
data->last_nonvolatile = jiffies;
}
static struct w83793_data *w83793_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
if (!(time_after(jiffies, data->last_updated + HZ * 2)
|| !data->valid))
goto END;
/* Update the voltages measured value and limits */
for (i = 0; i < ARRAY_SIZE(data->in); i++)
data->in[i][IN_READ] =
w83793_read_value(client, W83793_REG_IN[i][IN_READ]);
data->in_low_bits[IN_READ] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[IN_READ]);
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
if (!(data->has_fan & (1 << i))) {
continue;
}
data->fan[i] =
w83793_read_value(client, W83793_REG_FAN(i)) << 8;
data->fan[i] |=
w83793_read_value(client, W83793_REG_FAN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp[i][TEMP_READ] =
w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]);
}
data->temp_low_bits =
w83793_read_value(client, W83793_REG_TEMP_LOW_BITS);
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (data->has_pwm & (1 << i))
data->pwm[i][PWM_DUTY] =
w83793_read_value(client,
W83793_REG_PWM(i, PWM_DUTY));
}
for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
data->alarms[i] =
w83793_read_value(client, W83793_REG_ALARM(i));
if (data->has_vid & 0x01)
data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA);
if (data->has_vid & 0x02)
data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
w83793_update_nonvolatile(dev);
data->last_updated = jiffies;
data->valid = 1;
END:
mutex_unlock(&data->update_lock);
return data;
}
/* Ignore the possibility that somebody change bank outside the driver
Must be called with data->update_lock held, except during initialization */
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
u8 res = 0xff;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
if (i2c_smbus_write_byte_data
(client, W83793_REG_BANKSEL, new_bank) >= 0)
data->bank = new_bank;
else {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, read reg 0x%x error\n",
new_bank, data->bank, reg);
res = 0x0; /* read 0x0 from the chip */
goto END;
}
}
res = i2c_smbus_read_byte_data(client, reg & 0xff);
END:
return res;
}
/* Must be called with data->update_lock held, except during initialization */
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value)
{
struct w83793_data *data = i2c_get_clientdata(client);
int res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
if ((res = i2c_smbus_write_byte_data
(client, W83793_REG_BANKSEL, new_bank)) >= 0)
data->bank = new_bank;
else {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, write reg 0x%x error\n",
new_bank, data->bank, reg);
goto END;
}
}
res = i2c_smbus_write_byte_data(client, reg & 0xff, value);
END:
return res;
}
static int __init sensors_w83793_init(void)
{
return i2c_add_driver(&w83793_driver);
}
static void __exit sensors_w83793_exit(void)
{
i2c_del_driver(&w83793_driver);
}
MODULE_AUTHOR("Yuan Mu, Sven Anders");
MODULE_DESCRIPTION("w83793 driver");
MODULE_LICENSE("GPL");
module_init(sensors_w83793_init);
module_exit(sensors_w83793_exit);
| gpl-2.0 |
Snuzzo/vigor_aosp_kernel | drivers/net/irda/mcs7780.c | 3083 | 24725 | /*****************************************************************************
*
* Filename: mcs7780.c
* Version: 0.4-alpha
* Description: Irda MosChip USB Dongle Driver
* Authors: Lukasz Stelmach <stlman@poczta.fm>
* Brian Pugh <bpugh@cs.pdx.edu>
* Judy Fischbach <jfisch@cs.pdx.edu>
*
* Based on stir4200 driver, but some things done differently.
* Based on earlier driver by Paul Stewart <stewart@parc.com>
*
* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
* Copyright (C) 2005, Judy Fischbach <jfisch@cs.pdx.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
/*
* MCS7780 is a simple USB to IrDA bridge by MosChip. It is neither
* compatibile with irda-usb nor with stir4200. Although it is quite
* similar to the later as far as general idea of operation is concerned.
* That is it requires the software to do all the framing job at SIR speeds.
* The hardware does take care of the framing at MIR and FIR speeds.
* It supports all speeds from 2400 through 4Mbps
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/crc.h>
#include "mcs7780.h"
#define MCS_VENDOR_ID 0x9710
#define MCS_PRODUCT_ID 0x7780
static struct usb_device_id mcs_table[] = {
/* MosChip Corp., MCS7780 FIR-USB Adapter */
{USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)},
{},
};
MODULE_AUTHOR("Brian Pugh <bpugh@cs.pdx.edu>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for MosChip MCS7780");
MODULE_VERSION("0.3alpha");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, mcs_table);
static int qos_mtt_bits = 0x07 /* > 1ms */ ;
module_param(qos_mtt_bits, int, 0);
MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
static int receive_mode = 0x1;
module_param(receive_mode, int, 0);
MODULE_PARM_DESC(receive_mode,
"Receive mode of the device (1:fast, 0:slow, default:1)");
static int sir_tweak = 1;
module_param(sir_tweak, int, 0444);
MODULE_PARM_DESC(sir_tweak,
"Default pulse width (1:1.6us, 0:3/16 bit, default:1).");
static int transceiver_type = MCS_TSC_VISHAY;
module_param(transceiver_type, int, 0444);
MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h.");
static struct usb_driver mcs_driver = {
.name = "mcs7780",
.probe = mcs_probe,
.disconnect = mcs_disconnect,
.id_table = mcs_table,
};
/* speed flag selection by direct addressing.
addr = (speed >> 8) & 0x0f
0x1 57600 0x2 115200 0x4 1152000 0x5 9600
0x6 38400 0x9 2400 0xa 576000 0xb 19200
4Mbps (or 2400) must be checked separately. Since it also has
to be programmed in a different manner that is not a big problem.
*/
static __u16 mcs_speed_set[16] = { 0,
MCS_SPEED_57600,
MCS_SPEED_115200,
0,
MCS_SPEED_1152000,
MCS_SPEED_9600,
MCS_SPEED_38400,
0, 0,
MCS_SPEED_2400,
MCS_SPEED_576000,
MCS_SPEED_19200,
0, 0, 0,
};
/* Set given 16 bit register with a 16 bit value. Send control message
* to set dongle register. */
static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
{
struct usb_device *dev = mcs->usbdev;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
}
/* Get 16 bit register value. Send contol message to read dongle register. */
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{
struct usb_device *dev = mcs->usbdev;
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, val, 2,
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
return ret;
}
/* Setup a communication between mcs7780 and TFDU chips. It is described
* in more detail in the data sheet. The setup sequence puts the the
* vishay tranceiver into high speed mode. It will also receive SIR speed
* packets but at reduced sensitivity.
*/
/* 0: OK 1:ERROR */
static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
{
int ret = 0;
__u16 rval;
/* mcs_get_reg should read exactly two bytes from the dongle */
ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
if (unlikely(ret != 2)) {
ret = -EIO;
goto error;
}
/* The MCS_XCVR_CONF bit puts the transceiver into configuration
* mode. The MCS_MODE0 bit must start out high (1) and then
* transition to low and the MCS_STFIR and MCS_MODE1 bits must
* be low.
*/
rval |= (MCS_MODE0 | MCS_XCVR_CONF);
rval &= ~MCS_STFIR;
rval &= ~MCS_MODE1;
ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
if (unlikely(ret))
goto error;
rval &= ~MCS_MODE0;
ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
if (unlikely(ret))
goto error;
rval &= ~MCS_XCVR_CONF;
ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
if (unlikely(ret))
goto error;
ret = 0;
error:
return ret;
}
/* Setup a communication between mcs7780 and agilent chip. */
static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
{
IRDA_WARNING("This transceiver type is not supported yet.\n");
return 1;
}
/* Setup a communication between mcs7780 and sharp chip. */
static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
{
IRDA_WARNING("This transceiver type is not supported yet.\n");
return 1;
}
/* Common setup for all transceivers */
static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
{
int ret = 0;
__u16 rval;
char *msg;
msg = "Basic transceiver setup error.";
/* read value of MODE Register, set the DRIVER and RESET bits
* and write value back out to MODE Register
*/
ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
if(unlikely(ret != 2))
goto error;
rval |= MCS_DRIVER; /* put the mcs7780 into configuration mode. */
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
if(unlikely(ret))
goto error;
rval = 0; /* set min pulse width to 0 initially. */
ret = mcs_set_reg(mcs, MCS_MINRXPW_REG, rval);
if(unlikely(ret))
goto error;
ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
if(unlikely(ret != 2))
goto error;
rval &= ~MCS_FIR; /* turn off fir mode. */
if(mcs->sir_tweak)
rval |= MCS_SIR16US; /* 1.6us pulse width */
else
rval &= ~MCS_SIR16US; /* 3/16 bit time pulse width */
/* make sure ask mode and back to back packets are off. */
rval &= ~(MCS_BBTG | MCS_ASK);
rval &= ~MCS_SPEED_MASK;
rval |= MCS_SPEED_9600; /* make sure initial speed is 9600. */
mcs->speed = 9600;
mcs->new_speed = 0; /* new_speed is set to 0 */
rval &= ~MCS_PLLPWDN; /* disable power down. */
/* make sure device determines direction and that the auto send sip
* pulse are on.
*/
rval |= MCS_DTD | MCS_SIPEN;
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
if(unlikely(ret))
goto error;
msg = "transceiver model specific setup error.";
switch (mcs->transceiver_type) {
case MCS_TSC_VISHAY:
ret = mcs_setup_transceiver_vishay(mcs);
break;
case MCS_TSC_SHARP:
ret = mcs_setup_transceiver_sharp(mcs);
break;
case MCS_TSC_AGILENT:
ret = mcs_setup_transceiver_agilent(mcs);
break;
default:
IRDA_WARNING("Unknown transceiver type: %d\n",
mcs->transceiver_type);
ret = 1;
}
if (unlikely(ret))
goto error;
/* If transceiver is not SHARP, then if receive mode set
* on the RXFAST bit in the XCVR Register otherwise unset it
*/
if (mcs->transceiver_type != MCS_TSC_SHARP) {
ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
if (unlikely(ret != 2))
goto error;
if (mcs->receive_mode)
rval |= MCS_RXFAST;
else
rval &= ~MCS_RXFAST;
ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
if (unlikely(ret))
goto error;
}
msg = "transceiver reset.";
ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
if (unlikely(ret != 2))
goto error;
/* reset the mcs7780 so all changes take effect. */
rval &= ~MCS_RESET;
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
if (unlikely(ret))
goto error;
else
return ret;
error:
IRDA_ERROR("%s\n", msg);
return ret;
}
/* Wraps the data in format for SIR */
static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf)
{
int wraplen;
/* 2: full frame length, including "the length" */
wraplen = async_wrap_skb(skb, buf + 2, 4094);
wraplen += 2;
buf[0] = wraplen & 0xff;
buf[1] = (wraplen >> 8) & 0xff;
return wraplen;
}
/* Wraps the data in format for FIR */
static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
{
unsigned int len = 0;
__u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
/* add 2 bytes for length value and 4 bytes for fcs. */
len = skb->len + 6;
/* The mcs7780 requires that the first two bytes are the packet
* length in little endian order. Note: the length value includes
* the two bytes for the length value itself.
*/
buf[0] = len & 0xff;
buf[1] = (len >> 8) & 0xff;
/* copy the data into the tx buffer. */
skb_copy_from_linear_data(skb, buf + 2, skb->len);
/* put the fcs in the last four bytes in little endian order. */
buf[len - 4] = fcs & 0xff;
buf[len - 3] = (fcs >> 8) & 0xff;
buf[len - 2] = (fcs >> 16) & 0xff;
buf[len - 1] = (fcs >> 24) & 0xff;
return len;
}
/* Wraps the data in format for MIR */
static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
{
__u16 fcs = 0;
int len = skb->len + 4;
fcs = ~(irda_calc_crc16(~fcs, skb->data, skb->len));
/* put the total packet length in first. Note: packet length
* value includes the two bytes that hold the packet length
* itself.
*/
buf[0] = len & 0xff;
buf[1] = (len >> 8) & 0xff;
/* copy the data */
skb_copy_from_linear_data(skb, buf + 2, skb->len);
/* put the fcs in last two bytes in little endian order. */
buf[len - 2] = fcs & 0xff;
buf[len - 1] = (fcs >> 8) & 0xff;
return len;
}
/* Unwrap received packets at MIR speed. A 16 bit crc_ccitt checksum is
* used for the fcs. When performed over the entire packet the result
* should be GOOD_FCS = 0xf0b8. Hands the unwrapped data off to the IrDA
* layer via a sk_buff.
*/
static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
{
__u16 fcs;
int new_len;
struct sk_buff *skb;
/* Assume that the frames are going to fill a single packet
* rather than span multiple packets.
*/
new_len = len - 2;
if(unlikely(new_len <= 0)) {
IRDA_ERROR("%s short frame length %d\n",
mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
}
fcs = 0;
fcs = irda_calc_crc16(~fcs, buf, len);
if(fcs != GOOD_FCS) {
IRDA_ERROR("crc error calc 0x%x len %d\n",
fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
}
skb = dev_alloc_skb(new_len + 1);
if(unlikely(!skb)) {
++mcs->netdev->stats.rx_dropped;
return;
}
skb_reserve(skb, 1);
skb_copy_to_linear_data(skb, buf, new_len);
skb_put(skb, new_len);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
skb->dev = mcs->netdev;
netif_rx(skb);
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
}
/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
* used for the fcs. Hands the unwrapped data off to the IrDA
* layer via a sk_buff.
*/
static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
{
__u32 fcs;
int new_len;
struct sk_buff *skb;
/* Assume that the frames are going to fill a single packet
* rather than span multiple packets. This is most likely a false
* assumption.
*/
new_len = len - 4;
if(unlikely(new_len <= 0)) {
IRDA_ERROR("%s short frame length %d\n",
mcs->netdev->name, new_len);
++mcs->netdev->stats.rx_errors;
++mcs->netdev->stats.rx_length_errors;
return;
}
fcs = ~(crc32_le(~0, buf, new_len));
if(fcs != get_unaligned_le32(buf + new_len)) {
IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
mcs->netdev->stats.rx_errors++;
mcs->netdev->stats.rx_crc_errors++;
return;
}
skb = dev_alloc_skb(new_len + 1);
if(unlikely(!skb)) {
++mcs->netdev->stats.rx_dropped;
return;
}
skb_reserve(skb, 1);
skb_copy_to_linear_data(skb, buf, new_len);
skb_put(skb, new_len);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
skb->dev = mcs->netdev;
netif_rx(skb);
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
}
/* Allocates urbs for both receive and transmit.
* If alloc fails return error code 0 (fail) otherwise
* return error code 1 (success).
*/
static inline int mcs_setup_urbs(struct mcs_cb *mcs)
{
mcs->rx_urb = NULL;
mcs->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mcs->tx_urb)
return 0;
mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mcs->rx_urb)
return 0;
return 1;
}
/* Sets up state to be initially outside frame, gets receive urb,
* sets status to successful and then submits the urb to start
* receiving the data.
*/
static inline int mcs_receive_start(struct mcs_cb *mcs)
{
mcs->rx_buff.in_frame = FALSE;
mcs->rx_buff.state = OUTSIDE_FRAME;
usb_fill_bulk_urb(mcs->rx_urb, mcs->usbdev,
usb_rcvbulkpipe(mcs->usbdev, mcs->ep_in),
mcs->in_buf, 4096, mcs_receive_irq, mcs);
mcs->rx_urb->status = 0;
return usb_submit_urb(mcs->rx_urb, GFP_KERNEL);
}
/* Finds the in and out endpoints for the mcs control block */
static inline int mcs_find_endpoints(struct mcs_cb *mcs,
struct usb_host_endpoint *ep, int epnum)
{
int i;
int ret = 0;
/* If no place to store the endpoints just return */
if (!ep)
return ret;
/* cycle through all endpoints, find the first two that are DIR_IN */
for (i = 0; i < epnum; i++) {
if (ep[i].desc.bEndpointAddress & USB_DIR_IN)
mcs->ep_in = ep[i].desc.bEndpointAddress;
else
mcs->ep_out = ep[i].desc.bEndpointAddress;
/* MosChip says that the chip has only two bulk
* endpoints. Find one for each direction and move on.
*/
if ((mcs->ep_in != 0) && (mcs->ep_out != 0)) {
ret = 1;
break;
}
}
return ret;
}
static void mcs_speed_work(struct work_struct *work)
{
struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
struct net_device *netdev = mcs->netdev;
mcs_speed_change(mcs);
netif_wake_queue(netdev);
}
/* Function to change the speed of the mcs7780. Fully supports SIR,
* MIR, and FIR speeds.
*/
static int mcs_speed_change(struct mcs_cb *mcs)
{
int ret = 0;
int rst = 0;
int cnt = 0;
__u16 nspeed;
__u16 rval;
nspeed = mcs_speed_set[(mcs->new_speed >> 8) & 0x0f];
do {
mcs_get_reg(mcs, MCS_RESV_REG, &rval);
} while(cnt++ < 100 && (rval & MCS_IRINTX));
if (cnt > 100) {
IRDA_ERROR("unable to change speed\n");
ret = -EIO;
goto error;
}
mcs_get_reg(mcs, MCS_MODE_REG, &rval);
/* MINRXPW values recommended by MosChip */
if (mcs->new_speed <= 115200) {
rval &= ~MCS_FIR;
if ((rst = (mcs->speed > 115200)))
mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
} else if (mcs->new_speed <= 1152000) {
rval &= ~MCS_FIR;
if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
} else {
rval |= MCS_FIR;
if ((rst = (mcs->speed != 4000000)))
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
}
rval &= ~MCS_SPEED_MASK;
rval |= nspeed;
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
if (unlikely(ret))
goto error;
if (rst)
switch (mcs->transceiver_type) {
case MCS_TSC_VISHAY:
ret = mcs_setup_transceiver_vishay(mcs);
break;
case MCS_TSC_SHARP:
ret = mcs_setup_transceiver_sharp(mcs);
break;
case MCS_TSC_AGILENT:
ret = mcs_setup_transceiver_agilent(mcs);
break;
default:
ret = 1;
IRDA_WARNING("Unknown transceiver type: %d\n",
mcs->transceiver_type);
}
if (unlikely(ret))
goto error;
mcs_get_reg(mcs, MCS_MODE_REG, &rval);
rval &= ~MCS_RESET;
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
mcs->speed = mcs->new_speed;
error:
mcs->new_speed = 0;
return ret;
}
/* Ioctl calls not supported at this time. Can be an area of future work. */
static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
/* struct if_irda_req *irq = (struct if_irda_req *)rq; */
/* struct mcs_cb *mcs = netdev_priv(netdev); */
int ret = 0;
switch (cmd) {
default:
ret = -EOPNOTSUPP;
}
return ret;
}
/* Network device is taken down, done by "ifconfig irda0 down" */
static int mcs_net_close(struct net_device *netdev)
{
int ret = 0;
struct mcs_cb *mcs = netdev_priv(netdev);
/* Stop transmit processing */
netif_stop_queue(netdev);
kfree_skb(mcs->rx_buff.skb);
/* kill and free the receive and transmit URBs */
usb_kill_urb(mcs->rx_urb);
usb_free_urb(mcs->rx_urb);
usb_kill_urb(mcs->tx_urb);
usb_free_urb(mcs->tx_urb);
/* Stop and remove instance of IrLAP */
if (mcs->irlap)
irlap_close(mcs->irlap);
mcs->irlap = NULL;
return ret;
}
/* Network device is taken up, done by "ifconfig irda0 up" */
static int mcs_net_open(struct net_device *netdev)
{
struct mcs_cb *mcs = netdev_priv(netdev);
char hwname[16];
int ret = 0;
ret = usb_clear_halt(mcs->usbdev,
usb_sndbulkpipe(mcs->usbdev, mcs->ep_in));
if (ret)
goto error1;
ret = usb_clear_halt(mcs->usbdev,
usb_rcvbulkpipe(mcs->usbdev, mcs->ep_out));
if (ret)
goto error1;
ret = mcs_setup_transceiver(mcs);
if (ret)
goto error1;
ret = -ENOMEM;
/* Initialize for SIR/FIR to copy data directly into skb. */
mcs->receiving = 0;
mcs->rx_buff.truesize = IRDA_SKB_MAX_MTU;
mcs->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
if (!mcs->rx_buff.skb)
goto error1;
skb_reserve(mcs->rx_buff.skb, 1);
mcs->rx_buff.head = mcs->rx_buff.skb->data;
do_gettimeofday(&mcs->rx_time);
/*
* Now that everything should be initialized properly,
* Open new IrLAP layer instance to take care of us...
* Note : will send immediately a speed change...
*/
sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
if (!mcs->irlap) {
IRDA_ERROR("mcs7780: irlap_open failed\n");
goto error2;
}
if (!mcs_setup_urbs(mcs))
goto error3;
ret = mcs_receive_start(mcs);
if (ret)
goto error3;
netif_start_queue(netdev);
return 0;
error3:
irlap_close(mcs->irlap);
error2:
kfree_skb(mcs->rx_buff.skb);
error1:
return ret;
}
/* Receive callback function. */
static void mcs_receive_irq(struct urb *urb)
{
__u8 *bytes;
struct mcs_cb *mcs = urb->context;
int i;
int ret;
if (!netif_running(mcs->netdev))
return;
if (urb->status)
return;
if (urb->actual_length > 0) {
bytes = urb->transfer_buffer;
/* MCS returns frames without BOF and EOF
* I assume it returns whole frames.
*/
/* SIR speed */
if(mcs->speed < 576000) {
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
&mcs->rx_buff, 0xc0);
for (i = 0; i < urb->actual_length; i++)
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
&mcs->rx_buff, bytes[i]);
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
&mcs->rx_buff, 0xc1);
}
/* MIR speed */
else if(mcs->speed == 576000 || mcs->speed == 1152000) {
mcs_unwrap_mir(mcs, urb->transfer_buffer,
urb->actual_length);
}
/* FIR speed */
else {
mcs_unwrap_fir(mcs, urb->transfer_buffer,
urb->actual_length);
}
do_gettimeofday(&mcs->rx_time);
}
ret = usb_submit_urb(urb, GFP_ATOMIC);
}
/* Transmit callback function. */
static void mcs_send_irq(struct urb *urb)
{
struct mcs_cb *mcs = urb->context;
struct net_device *ndev = mcs->netdev;
if (unlikely(mcs->new_speed))
schedule_work(&mcs->work);
else
netif_wake_queue(ndev);
}
/* Transmit callback function. */
static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
unsigned long flags;
struct mcs_cb *mcs;
int wraplen;
int ret = 0;
netif_stop_queue(ndev);
mcs = netdev_priv(ndev);
spin_lock_irqsave(&mcs->lock, flags);
mcs->new_speed = irda_get_next_speed(skb);
if (likely(mcs->new_speed == mcs->speed))
mcs->new_speed = 0;
/* SIR speed */
if(mcs->speed < 576000) {
wraplen = mcs_wrap_sir_skb(skb, mcs->out_buf);
}
/* MIR speed */
else if(mcs->speed == 576000 || mcs->speed == 1152000) {
wraplen = mcs_wrap_mir_skb(skb, mcs->out_buf);
}
/* FIR speed */
else {
wraplen = mcs_wrap_fir_skb(skb, mcs->out_buf);
}
usb_fill_bulk_urb(mcs->tx_urb, mcs->usbdev,
usb_sndbulkpipe(mcs->usbdev, mcs->ep_out),
mcs->out_buf, wraplen, mcs_send_irq, mcs);
if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
IRDA_ERROR("failed tx_urb: %d\n", ret);
switch (ret) {
case -ENODEV:
case -EPIPE:
break;
default:
mcs->netdev->stats.tx_errors++;
netif_start_queue(ndev);
}
} else {
mcs->netdev->stats.tx_packets++;
mcs->netdev->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
spin_unlock_irqrestore(&mcs->lock, flags);
return NETDEV_TX_OK;
}
static const struct net_device_ops mcs_netdev_ops = {
.ndo_open = mcs_net_open,
.ndo_stop = mcs_net_close,
.ndo_start_xmit = mcs_hard_xmit,
.ndo_do_ioctl = mcs_net_ioctl,
};
/*
* This function is called by the USB subsystem for each new device in the
* system. Need to verify the device and if it is, then start handling it.
*/
static int mcs_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct net_device *ndev = NULL;
struct mcs_cb *mcs;
int ret = -ENOMEM;
ndev = alloc_irdadev(sizeof(*mcs));
if (!ndev)
goto error1;
IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
SET_NETDEV_DEV(ndev, &intf->dev);
ret = usb_reset_configuration(udev);
if (ret != 0) {
IRDA_ERROR("mcs7780: usb reset configuration failed\n");
goto error2;
}
mcs = netdev_priv(ndev);
mcs->usbdev = udev;
mcs->netdev = ndev;
spin_lock_init(&mcs->lock);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&mcs->qos);
/* That's the Rx capability. */
mcs->qos.baud_rate.bits &=
IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200
| IR_576000 | IR_1152000 | (IR_4000000 << 8);
mcs->qos.min_turn_time.bits &= qos_mtt_bits;
irda_qos_bits_to_value(&mcs->qos);
/* Speed change work initialisation*/
INIT_WORK(&mcs->work, mcs_speed_work);
ndev->netdev_ops = &mcs_netdev_ops;
if (!intf->cur_altsetting)
goto error2;
ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
intf->cur_altsetting->desc.bNumEndpoints);
if (!ret) {
ret = -ENODEV;
goto error2;
}
ret = register_netdev(ndev);
if (ret != 0)
goto error2;
IRDA_DEBUG(1, "IrDA: Registered MosChip MCS7780 device as %s\n",
ndev->name);
mcs->transceiver_type = transceiver_type;
mcs->sir_tweak = sir_tweak;
mcs->receive_mode = receive_mode;
usb_set_intfdata(intf, mcs);
return 0;
error2:
free_netdev(ndev);
error1:
return ret;
}
/* The current device is removed, the USB layer tells us to shut down. */
static void mcs_disconnect(struct usb_interface *intf)
{
struct mcs_cb *mcs = usb_get_intfdata(intf);
if (!mcs)
return;
cancel_work_sync(&mcs->work);
unregister_netdev(mcs->netdev);
free_netdev(mcs->netdev);
usb_set_intfdata(intf, NULL);
IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
}
/* Module insertion */
static int __init mcs_init(void)
{
int result;
/* register this driver with the USB subsystem */
result = usb_register(&mcs_driver);
if (result)
IRDA_ERROR("usb_register failed. Error number %d\n", result);
return result;
}
module_init(mcs_init);
/* Module removal */
static void __exit mcs_exit(void)
{
/* deregister this driver with the USB subsystem */
usb_deregister(&mcs_driver);
}
module_exit(mcs_exit);
| gpl-2.0 |
championswimmer/kernel_sony_msm8960t | drivers/staging/iio/dac/ad5446.c | 4875 | 10900 | /*
* AD5446 SPI DAC driver
*
* Copyright 2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
#include "dac.h"
#include "ad5446.h"
static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
{
st->data.d16 = cpu_to_be16(AD5446_LOAD | val);
}
static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
{
st->data.d16 = cpu_to_be16(val);
}
static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
{
st->data.d16 = cpu_to_be16(AD5620_LOAD | val);
}
static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
{
val |= AD5660_LOAD;
st->data.d24[0] = (val >> 16) & 0xFF;
st->data.d24[1] = (val >> 8) & 0xFF;
st->data.d24[2] = val & 0xFF;
}
static void ad5620_store_pwr_down(struct ad5446_state *st, unsigned mode)
{
st->data.d16 = cpu_to_be16(mode << 14);
}
static void ad5660_store_pwr_down(struct ad5446_state *st, unsigned mode)
{
unsigned val = mode << 16;
st->data.d24[0] = (val >> 16) & 0xFF;
st->data.d24[1] = (val >> 8) & 0xFF;
st->data.d24[2] = val & 0xFF;
}
static ssize_t ad5446_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
if (sysfs_streq(buf, "1kohm_to_gnd"))
st->pwr_down_mode = MODE_PWRDWN_1k;
else if (sysfs_streq(buf, "100kohm_to_gnd"))
st->pwr_down_mode = MODE_PWRDWN_100k;
else if (sysfs_streq(buf, "three_state"))
st->pwr_down_mode = MODE_PWRDWN_TRISTATE;
else
return -EINVAL;
return len;
}
static ssize_t ad5446_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
return sprintf(buf, "%s\n", mode[st->pwr_down_mode]);
}
static ssize_t ad5446_read_dac_powerdown(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", st->pwr_down);
}
static ssize_t ad5446_write_dac_powerdown(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
unsigned long readin;
int ret;
ret = strict_strtol(buf, 10, &readin);
if (ret)
return ret;
if (readin > 1)
ret = -EINVAL;
mutex_lock(&indio_dev->mlock);
st->pwr_down = readin;
if (st->pwr_down)
st->chip_info->store_pwr_down(st, st->pwr_down_mode);
else
st->chip_info->store_sample(st, st->cached_val);
ret = spi_sync(st->spi, &st->msg);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO | S_IWUSR,
ad5446_read_powerdown_mode,
ad5446_write_powerdown_mode, 0);
static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"1kohm_to_gnd 100kohm_to_gnd three_state");
static IIO_DEVICE_ATTR(out_voltage0_powerdown, S_IRUGO | S_IWUSR,
ad5446_read_dac_powerdown,
ad5446_write_dac_powerdown, 0);
static struct attribute *ad5446_attributes[] = {
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
static const struct attribute_group ad5446_attribute_group = {
.attrs = ad5446_attributes,
};
#define AD5446_CHANNEL(bits, storage, shift) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = 0, \
.info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.scan_type = IIO_ST('u', (bits), (storage), (shift)) \
}
static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
[ID_AD5444] = {
.channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5446_store_sample,
},
[ID_AD5446] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5446_store_sample,
},
[ID_AD5541A] = {
.channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5542A] = {
.channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5543] = {
.channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5512A] = {
.channel = AD5446_CHANNEL(12, 16, 4),
.store_sample = ad5542_store_sample,
},
[ID_AD5553] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5601] = {
.channel = AD5446_CHANNEL(8, 16, 6),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5611] = {
.channel = AD5446_CHANNEL(10, 16, 4),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5621] = {
.channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_2500] = {
.channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_1250] = {
.channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_2500] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_1250] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5660_2500] = {
.channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
[ID_AD5660_1250] = {
.channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
};
static int ad5446_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad5446_state *st = iio_priv(indio_dev);
unsigned long scale_uv;
switch (m) {
case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
static int ad5446_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct ad5446_state *st = iio_priv(indio_dev);
int ret;
switch (mask) {
case 0:
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
val <<= chan->scan_type.shift;
mutex_lock(&indio_dev->mlock);
st->cached_val = val;
st->chip_info->store_sample(st, val);
ret = spi_sync(st->spi, &st->msg);
mutex_unlock(&indio_dev->mlock);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct iio_info ad5446_info = {
.read_raw = ad5446_read_raw,
.write_raw = ad5446_write_raw,
.attrs = &ad5446_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad5446_info_no_pwr_down = {
.read_raw = ad5446_read_raw,
.write_raw = ad5446_write_raw,
.driver_module = THIS_MODULE,
};
static int __devinit ad5446_probe(struct spi_device *spi)
{
struct ad5446_state *st;
struct iio_dev *indio_dev;
struct regulator *reg;
int ret, voltage_uv = 0;
reg = regulator_get(&spi->dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
voltage_uv = regulator_get_voltage(reg);
}
indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
}
st = iio_priv(indio_dev);
st->chip_info =
&ad5446_chip_info_tbl[spi_get_device_id(spi)->driver_data];
spi_set_drvdata(spi, indio_dev);
st->reg = reg;
st->spi = spi;
/* Establish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
if (st->chip_info->store_pwr_down)
indio_dev->info = &ad5446_info;
else
indio_dev->info = &ad5446_info_no_pwr_down;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
/* Setup default message */
st->xfer.tx_buf = &st->data;
st->xfer.len = st->chip_info->channel.scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
switch (spi_get_device_id(spi)->driver_data) {
case ID_AD5620_2500:
case ID_AD5620_1250:
case ID_AD5640_2500:
case ID_AD5640_1250:
case ID_AD5660_2500:
case ID_AD5660_1250:
st->vref_mv = st->chip_info->int_vref_mv;
break;
default:
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else
dev_warn(&spi->dev,
"reference voltage unspecified\n");
}
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_device;
return 0;
error_free_device:
iio_free_device(indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
regulator_disable(reg);
error_put_reg:
if (!IS_ERR(reg))
regulator_put(reg);
return ret;
}
static int ad5446_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5446_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
iio_free_device(indio_dev);
return 0;
}
static const struct spi_device_id ad5446_id[] = {
{"ad5444", ID_AD5444},
{"ad5446", ID_AD5446},
{"ad5512a", ID_AD5512A},
{"ad5541a", ID_AD5541A},
{"ad5542a", ID_AD5542A},
{"ad5543", ID_AD5543},
{"ad5553", ID_AD5553},
{"ad5601", ID_AD5601},
{"ad5611", ID_AD5611},
{"ad5621", ID_AD5621},
{"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
{"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
{"ad5640-2500", ID_AD5640_2500},
{"ad5640-1250", ID_AD5640_1250},
{"ad5660-2500", ID_AD5660_2500},
{"ad5660-1250", ID_AD5660_1250},
{}
};
MODULE_DEVICE_TABLE(spi, ad5446_id);
static struct spi_driver ad5446_driver = {
.driver = {
.name = "ad5446",
.owner = THIS_MODULE,
},
.probe = ad5446_probe,
.remove = __devexit_p(ad5446_remove),
.id_table = ad5446_id,
};
module_spi_driver(ad5446_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
szezso/android_kernel_samsung_expressltexx | drivers/input/touchscreen/tnetv107x-ts.c | 4875 | 9467 | /*
* Texas Instruments TNETV107X Touchscreen Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <mach/tnetv107x.h>
#define TSC_PENUP_POLL (HZ / 5)
#define IDLE_TIMEOUT 100 /* msec */
/*
* The first and last samples of a touch interval are usually garbage and need
* to be filtered out with these devices. The following definitions control
* the number of samples skipped.
*/
#define TSC_HEAD_SKIP 1
#define TSC_TAIL_SKIP 1
#define TSC_SKIP (TSC_HEAD_SKIP + TSC_TAIL_SKIP + 1)
#define TSC_SAMPLES (TSC_SKIP + 1)
/* Register Offsets */
struct tsc_regs {
u32 rev;
u32 tscm;
u32 bwcm;
u32 swc;
u32 adcchnl;
u32 adcdata;
u32 chval[4];
};
/* TSC Mode Configuration Register (tscm) bits */
#define WMODE BIT(0)
#define TSKIND BIT(1)
#define ZMEASURE_EN BIT(2)
#define IDLE BIT(3)
#define TSC_EN BIT(4)
#define STOP BIT(5)
#define ONE_SHOT BIT(6)
#define SINGLE BIT(7)
#define AVG BIT(8)
#define AVGNUM(x) (((x) & 0x03) << 9)
#define PVSTC(x) (((x) & 0x07) << 11)
#define PON BIT(14)
#define PONBG BIT(15)
#define AFERST BIT(16)
/* ADC DATA Capture Register bits */
#define DATA_VALID BIT(16)
/* Register Access Macros */
#define tsc_read(ts, reg) __raw_readl(&(ts)->regs->reg)
#define tsc_write(ts, reg, val) __raw_writel(val, &(ts)->regs->reg);
#define tsc_set_bits(ts, reg, val) \
tsc_write(ts, reg, tsc_read(ts, reg) | (val))
#define tsc_clr_bits(ts, reg, val) \
tsc_write(ts, reg, tsc_read(ts, reg) & ~(val))
struct sample {
int x, y, p;
};
struct tsc_data {
struct input_dev *input_dev;
struct resource *res;
struct tsc_regs __iomem *regs;
struct timer_list timer;
spinlock_t lock;
struct clk *clk;
struct device *dev;
int sample_count;
struct sample samples[TSC_SAMPLES];
int tsc_irq;
};
static int tsc_read_sample(struct tsc_data *ts, struct sample* sample)
{
int x, y, z1, z2, t, p = 0;
u32 val;
val = tsc_read(ts, chval[0]);
if (val & DATA_VALID)
x = val & 0xffff;
else
return -EINVAL;
y = tsc_read(ts, chval[1]) & 0xffff;
z1 = tsc_read(ts, chval[2]) & 0xffff;
z2 = tsc_read(ts, chval[3]) & 0xffff;
if (z1) {
t = ((600 * x) * (z2 - z1));
p = t / (u32) (z1 << 12);
if (p < 0)
p = 0;
}
sample->x = x;
sample->y = y;
sample->p = p;
return 0;
}
static void tsc_poll(unsigned long data)
{
struct tsc_data *ts = (struct tsc_data *)data;
unsigned long flags;
int i, val, x, y, p;
spin_lock_irqsave(&ts->lock, flags);
if (ts->sample_count >= TSC_SKIP) {
input_report_abs(ts->input_dev, ABS_PRESSURE, 0);
input_report_key(ts->input_dev, BTN_TOUCH, 0);
input_sync(ts->input_dev);
} else if (ts->sample_count > 0) {
/*
* A touch event lasted less than our skip count. Salvage and
* report anyway.
*/
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].x;
x = val / ts->sample_count;
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].y;
y = val / ts->sample_count;
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].p;
p = val / ts->sample_count;
input_report_abs(ts->input_dev, ABS_X, x);
input_report_abs(ts->input_dev, ABS_Y, y);
input_report_abs(ts->input_dev, ABS_PRESSURE, p);
input_report_key(ts->input_dev, BTN_TOUCH, 1);
input_sync(ts->input_dev);
}
ts->sample_count = 0;
spin_unlock_irqrestore(&ts->lock, flags);
}
static irqreturn_t tsc_irq(int irq, void *dev_id)
{
struct tsc_data *ts = (struct tsc_data *)dev_id;
struct sample *sample;
int index;
spin_lock(&ts->lock);
index = ts->sample_count % TSC_SAMPLES;
sample = &ts->samples[index];
if (tsc_read_sample(ts, sample) < 0)
goto out;
if (++ts->sample_count >= TSC_SKIP) {
index = (ts->sample_count - TSC_TAIL_SKIP - 1) % TSC_SAMPLES;
sample = &ts->samples[index];
input_report_abs(ts->input_dev, ABS_X, sample->x);
input_report_abs(ts->input_dev, ABS_Y, sample->y);
input_report_abs(ts->input_dev, ABS_PRESSURE, sample->p);
if (ts->sample_count == TSC_SKIP)
input_report_key(ts->input_dev, BTN_TOUCH, 1);
input_sync(ts->input_dev);
}
mod_timer(&ts->timer, jiffies + TSC_PENUP_POLL);
out:
spin_unlock(&ts->lock);
return IRQ_HANDLED;
}
static int tsc_start(struct input_dev *dev)
{
struct tsc_data *ts = input_get_drvdata(dev);
unsigned long timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT);
u32 val;
clk_enable(ts->clk);
/* Go to idle mode, before any initialization */
while (time_after(timeout, jiffies)) {
if (tsc_read(ts, tscm) & IDLE)
break;
}
if (time_before(timeout, jiffies)) {
dev_warn(ts->dev, "timeout waiting for idle\n");
clk_disable(ts->clk);
return -EIO;
}
/* Configure TSC Control register*/
val = (PONBG | PON | PVSTC(4) | ONE_SHOT | ZMEASURE_EN);
tsc_write(ts, tscm, val);
/* Bring TSC out of reset: Clear AFE reset bit */
val &= ~(AFERST);
tsc_write(ts, tscm, val);
/* Configure all pins for hardware control*/
tsc_write(ts, bwcm, 0);
/* Finally enable the TSC */
tsc_set_bits(ts, tscm, TSC_EN);
return 0;
}
static void tsc_stop(struct input_dev *dev)
{
struct tsc_data *ts = input_get_drvdata(dev);
tsc_clr_bits(ts, tscm, TSC_EN);
synchronize_irq(ts->tsc_irq);
del_timer_sync(&ts->timer);
clk_disable(ts->clk);
}
static int __devinit tsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tsc_data *ts;
int error = 0;
u32 rev = 0;
ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
if (!ts) {
dev_err(dev, "cannot allocate device info\n");
return -ENOMEM;
}
ts->dev = dev;
spin_lock_init(&ts->lock);
setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
platform_set_drvdata(pdev, ts);
ts->tsc_irq = platform_get_irq(pdev, 0);
if (ts->tsc_irq < 0) {
dev_err(dev, "cannot determine device interrupt\n");
error = -ENODEV;
goto error_res;
}
ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ts->res) {
dev_err(dev, "cannot determine register area\n");
error = -ENODEV;
goto error_res;
}
if (!request_mem_region(ts->res->start, resource_size(ts->res),
pdev->name)) {
dev_err(dev, "cannot claim register memory\n");
ts->res = NULL;
error = -EINVAL;
goto error_res;
}
ts->regs = ioremap(ts->res->start, resource_size(ts->res));
if (!ts->regs) {
dev_err(dev, "cannot map register memory\n");
error = -ENOMEM;
goto error_map;
}
ts->clk = clk_get(dev, NULL);
if (IS_ERR(ts->clk)) {
dev_err(dev, "cannot claim device clock\n");
error = PTR_ERR(ts->clk);
goto error_clk;
}
error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
dev_name(dev), ts);
if (error < 0) {
dev_err(ts->dev, "Could not allocate ts irq\n");
goto error_irq;
}
ts->input_dev = input_allocate_device();
if (!ts->input_dev) {
dev_err(dev, "cannot allocate input device\n");
error = -ENOMEM;
goto error_input;
}
input_set_drvdata(ts->input_dev, ts);
ts->input_dev->name = pdev->name;
ts->input_dev->id.bustype = BUS_HOST;
ts->input_dev->dev.parent = &pdev->dev;
ts->input_dev->open = tsc_start;
ts->input_dev->close = tsc_stop;
clk_enable(ts->clk);
rev = tsc_read(ts, rev);
ts->input_dev->id.product = ((rev >> 8) & 0x07);
ts->input_dev->id.version = ((rev >> 16) & 0xfff);
clk_disable(ts->clk);
__set_bit(EV_KEY, ts->input_dev->evbit);
__set_bit(EV_ABS, ts->input_dev->evbit);
__set_bit(BTN_TOUCH, ts->input_dev->keybit);
input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);
error = input_register_device(ts->input_dev);
if (error < 0) {
dev_err(dev, "failed input device registration\n");
goto error_reg;
}
return 0;
error_reg:
input_free_device(ts->input_dev);
error_input:
free_irq(ts->tsc_irq, ts);
error_irq:
clk_put(ts->clk);
error_clk:
iounmap(ts->regs);
error_map:
release_mem_region(ts->res->start, resource_size(ts->res));
error_res:
platform_set_drvdata(pdev, NULL);
kfree(ts);
return error;
}
static int __devexit tsc_remove(struct platform_device *pdev)
{
struct tsc_data *ts = platform_get_drvdata(pdev);
input_unregister_device(ts->input_dev);
free_irq(ts->tsc_irq, ts);
clk_put(ts->clk);
iounmap(ts->regs);
release_mem_region(ts->res->start, resource_size(ts->res));
platform_set_drvdata(pdev, NULL);
kfree(ts);
return 0;
}
static struct platform_driver tsc_driver = {
.probe = tsc_probe,
.remove = __devexit_p(tsc_remove),
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
module_platform_driver(tsc_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
MODULE_ALIAS("platform:tnetv107x-ts");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Ander-Alvarez/android_kernel_motorola_msm8226 | sound/ppc/pmac.c | 5643 | 38204 | /*
* PMac DBDMA lowlevel functions
*
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
* code based on dmasound.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include "pmac.h"
#include <sound/pcm_params.h>
#include <asm/pmac_feature.h>
#include <asm/pci-bridge.h>
/* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */
static int awacs_freqs[8] = {
44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350
};
/* fixed frequency table for tumbler */
static int tumbler_freqs[1] = {
44100
};
/*
* we will allocate a single 'emergency' dbdma cmd block to use if the
* tx status comes up "DEAD". This happens on some PowerComputing Pmac
* clones, either owing to a bug in dbdma or some interaction between
* IDE and sound. However, this measure would deal with DEAD status if
* it appeared elsewhere.
*/
static struct pmac_dbdma emergency_dbdma;
static int emergency_in_use;
/*
* allocate DBDMA command arrays
*/
static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size)
{
unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1);
rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize,
&rec->dma_base, GFP_KERNEL);
if (rec->space == NULL)
return -ENOMEM;
rec->size = size;
memset(rec->space, 0, rsize);
rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space);
rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space);
return 0;
}
static void snd_pmac_dbdma_free(struct snd_pmac *chip, struct pmac_dbdma *rec)
{
if (rec->space) {
unsigned int rsize = sizeof(struct dbdma_cmd) * (rec->size + 1);
dma_free_coherent(&chip->pdev->dev, rsize, rec->space, rec->dma_base);
}
}
/*
* pcm stuff
*/
/*
* look up frequency table
*/
unsigned int snd_pmac_rate_index(struct snd_pmac *chip, struct pmac_stream *rec, unsigned int rate)
{
int i, ok, found;
ok = rec->cur_freqs;
if (rate > chip->freq_table[0])
return 0;
found = 0;
for (i = 0; i < chip->num_freqs; i++, ok >>= 1) {
if (! (ok & 1)) continue;
found = i;
if (rate >= chip->freq_table[i])
break;
}
return found;
}
/*
* check whether another stream is active
*/
static inline int another_stream(int stream)
{
return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
}
/*
* allocate buffers
*/
static int snd_pmac_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
* release buffers
*/
static int snd_pmac_pcm_hw_free(struct snd_pcm_substream *subs)
{
snd_pcm_lib_free_pages(subs);
return 0;
}
/*
* get a stream of the opposite direction
*/
static struct pmac_stream *snd_pmac_get_stream(struct snd_pmac *chip, int stream)
{
switch (stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
return &chip->playback;
case SNDRV_PCM_STREAM_CAPTURE:
return &chip->capture;
default:
snd_BUG();
return NULL;
}
}
/*
* wait while run status is on
*/
static inline void
snd_pmac_wait_ack(struct pmac_stream *rec)
{
int timeout = 50000;
while ((in_le32(&rec->dma->status) & RUN) && timeout-- > 0)
udelay(1);
}
/*
* set the format and rate to the chip.
* call the lowlevel function if defined (e.g. for AWACS).
*/
static void snd_pmac_pcm_set_format(struct snd_pmac *chip)
{
/* set up frequency and format */
out_le32(&chip->awacs->control, chip->control_mask | (chip->rate_index << 8));
out_le32(&chip->awacs->byteswap, chip->format == SNDRV_PCM_FORMAT_S16_LE ? 1 : 0);
if (chip->set_format)
chip->set_format(chip);
}
/*
* stop the DMA transfer
*/
static inline void snd_pmac_dma_stop(struct pmac_stream *rec)
{
out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16);
snd_pmac_wait_ack(rec);
}
/*
* set the command pointer address
*/
static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd)
{
out_le32(&rec->dma->cmdptr, cmd->addr);
}
/*
* start the DMA
*/
static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status)
{
out_le32(&rec->dma->control, status | (status << 16));
}
/*
* prepare playback/capture stream
*/
static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs)
{
int i;
volatile struct dbdma_cmd __iomem *cp;
struct snd_pcm_runtime *runtime = subs->runtime;
int rate_index;
long offset;
struct pmac_stream *astr;
rec->dma_size = snd_pcm_lib_buffer_bytes(subs);
rec->period_size = snd_pcm_lib_period_bytes(subs);
rec->nperiods = rec->dma_size / rec->period_size;
rec->cur_period = 0;
rate_index = snd_pmac_rate_index(chip, rec, runtime->rate);
/* set up constraints */
astr = snd_pmac_get_stream(chip, another_stream(rec->stream));
if (! astr)
return -EINVAL;
astr->cur_freqs = 1 << rate_index;
astr->cur_formats = 1 << runtime->format;
chip->rate_index = rate_index;
chip->format = runtime->format;
/* We really want to execute a DMA stop command, after the AWACS
* is initialized.
* For reasons I don't understand, it stops the hissing noise
* common to many PowerBook G3 systems and random noise otherwise
* captured on iBook2's about every third time. -ReneR
*/
spin_lock_irq(&chip->reg_lock);
snd_pmac_dma_stop(rec);
st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
snd_pmac_dma_set_command(rec, &chip->extra_dma);
snd_pmac_dma_run(rec, RUN);
spin_unlock_irq(&chip->reg_lock);
mdelay(5);
spin_lock_irq(&chip->reg_lock);
/* continuous DMA memory type doesn't provide the physical address,
* so we need to resolve the address here...
*/
offset = runtime->dma_addr;
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) {
st_le32(&cp->phy_addr, offset);
st_le16(&cp->req_count, rec->period_size);
/*st_le16(&cp->res_count, 0);*/
st_le16(&cp->xfer_status, 0);
offset += rec->period_size;
}
/* make loop */
st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
st_le32(&cp->cmd_dep, rec->cmd.addr);
snd_pmac_dma_stop(rec);
snd_pmac_dma_set_command(rec, &rec->cmd);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/*
* PCM trigger/stop
*/
static int snd_pmac_pcm_trigger(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs, int cmd)
{
volatile struct dbdma_cmd __iomem *cp;
int i, command;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
if (rec->running)
return -EBUSY;
command = (subs->stream == SNDRV_PCM_STREAM_PLAYBACK ?
OUTPUT_MORE : INPUT_MORE) + INTR_ALWAYS;
spin_lock(&chip->reg_lock);
snd_pmac_beep_stop(chip);
snd_pmac_pcm_set_format(chip);
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++)
out_le16(&cp->command, command);
snd_pmac_dma_set_command(rec, &rec->cmd);
(void)in_le32(&rec->dma->status);
snd_pmac_dma_run(rec, RUN|WAKE);
rec->running = 1;
spin_unlock(&chip->reg_lock);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&chip->reg_lock);
rec->running = 0;
/*printk(KERN_DEBUG "stopped!!\n");*/
snd_pmac_dma_stop(rec);
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++)
out_le16(&cp->command, DBDMA_STOP);
spin_unlock(&chip->reg_lock);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* return the current pointer
*/
inline
static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip,
struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
int count = 0;
#if 1 /* hmm.. how can we get the current dma pointer?? */
int stat;
volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period];
stat = ld_le16(&cp->xfer_status);
if (stat & (ACTIVE|DEAD)) {
count = in_le16(&cp->res_count);
if (count)
count = rec->period_size - count;
}
#endif
count += rec->cur_period * rec->period_size;
/*printk(KERN_DEBUG "pointer=%d\n", count);*/
return bytes_to_frames(subs->runtime, count);
}
/*
* playback
*/
static int snd_pmac_playback_prepare(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_prepare(chip, &chip->playback, subs);
}
static int snd_pmac_playback_trigger(struct snd_pcm_substream *subs,
int cmd)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_trigger(chip, &chip->playback, subs, cmd);
}
static snd_pcm_uframes_t snd_pmac_playback_pointer(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_pointer(chip, &chip->playback, subs);
}
/*
* capture
*/
static int snd_pmac_capture_prepare(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_prepare(chip, &chip->capture, subs);
}
static int snd_pmac_capture_trigger(struct snd_pcm_substream *subs,
int cmd)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_trigger(chip, &chip->capture, subs, cmd);
}
static snd_pcm_uframes_t snd_pmac_capture_pointer(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_pointer(chip, &chip->capture, subs);
}
/*
* Handle DEAD DMA transfers:
* if the TX status comes up "DEAD" - reported on some Power Computing machines
* we need to re-start the dbdma - but from a different physical start address
* and with a different transfer length. It would get very messy to do this
* with the normal dbdma_cmd blocks - we would have to re-write the buffer start
* addresses each time. So, we will keep a single dbdma_cmd block which can be
* fiddled with.
* When DEAD status is first reported the content of the faulted dbdma block is
* copied into the emergency buffer and we note that the buffer is in use.
* we then bump the start physical address by the amount that was successfully
* output before it died.
* On any subsequent DEAD result we just do the bump-ups (we know that we are
* already using the emergency dbdma_cmd).
* CHECK: this just tries to "do it". It is possible that we should abandon
* xfers when the number of residual bytes gets below a certain value - I can
* see that this might cause a loop-forever if a too small transfer causes
* DEAD status. However this is a TODO for now - we'll see what gets reported.
* When we get a successful transfer result with the emergency buffer we just
* pretend that it completed using the original dmdma_cmd and carry on. The
* 'next_cmd' field will already point back to the original loop of blocks.
*/
static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec,
volatile struct dbdma_cmd __iomem *cp)
{
unsigned short req, res ;
unsigned int phy ;
/* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */
/* to clear DEAD status we must first clear RUN
set it to quiescent to be on the safe side */
(void)in_le32(&rec->dma->status);
out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
if (!emergency_in_use) { /* new problem */
memcpy((void *)emergency_dbdma.cmds, (void *)cp,
sizeof(struct dbdma_cmd));
emergency_in_use = 1;
st_le16(&cp->xfer_status, 0);
st_le16(&cp->req_count, rec->period_size);
cp = emergency_dbdma.cmds;
}
/* now bump the values to reflect the amount
we haven't yet shifted */
req = ld_le16(&cp->req_count);
res = ld_le16(&cp->res_count);
phy = ld_le32(&cp->phy_addr);
phy += (req - res);
st_le16(&cp->req_count, res);
st_le16(&cp->res_count, 0);
st_le16(&cp->xfer_status, 0);
st_le32(&cp->phy_addr, phy);
st_le32(&cp->cmd_dep, rec->cmd.addr
+ sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods));
st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS);
/* point at our patched up command block */
out_le32(&rec->dma->cmdptr, emergency_dbdma.addr);
/* we must re-start the controller */
(void)in_le32(&rec->dma->status);
/* should complete clearing the DEAD status */
out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
}
/*
* update playback/capture pointer from interrupts
*/
static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
{
volatile struct dbdma_cmd __iomem *cp;
int c;
int stat;
spin_lock(&chip->reg_lock);
if (rec->running) {
for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */
if (emergency_in_use) /* already using DEAD xfer? */
cp = emergency_dbdma.cmds;
else
cp = &rec->cmd.cmds[rec->cur_period];
stat = ld_le16(&cp->xfer_status);
if (stat & DEAD) {
snd_pmac_pcm_dead_xfer(rec, cp);
break; /* this block is still going */
}
if (emergency_in_use)
emergency_in_use = 0 ; /* done that */
if (! (stat & ACTIVE))
break;
/*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/
st_le16(&cp->xfer_status, 0);
st_le16(&cp->req_count, rec->period_size);
/*st_le16(&cp->res_count, 0);*/
rec->cur_period++;
if (rec->cur_period >= rec->nperiods) {
rec->cur_period = 0;
}
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(rec->substream);
spin_lock(&chip->reg_lock);
}
}
spin_unlock(&chip->reg_lock);
}
/*
* hw info
*/
static struct snd_pcm_hardware snd_pmac_playback =
{
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000_44100,
.rate_min = 7350,
.rate_max = 44100,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 131072,
.period_bytes_min = 256,
.period_bytes_max = 16384,
.periods_min = 3,
.periods_max = PMAC_MAX_FRAGS,
};
static struct snd_pcm_hardware snd_pmac_capture =
{
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000_44100,
.rate_min = 7350,
.rate_max = 44100,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 131072,
.period_bytes_min = 256,
.period_bytes_max = 16384,
.periods_min = 3,
.periods_max = PMAC_MAX_FRAGS,
};
#if 0 // NYI
static int snd_pmac_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pmac *chip = rule->private;
struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]);
int i, freq_table[8], num_freqs;
if (! rec)
return -EINVAL;
num_freqs = 0;
for (i = chip->num_freqs - 1; i >= 0; i--) {
if (rec->cur_freqs & (1 << i))
freq_table[num_freqs++] = chip->freq_table[i];
}
return snd_interval_list(hw_param_interval(params, rule->var),
num_freqs, freq_table, 0);
}
static int snd_pmac_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pmac *chip = rule->private;
struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]);
if (! rec)
return -EINVAL;
return snd_mask_refine_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
rec->cur_formats);
}
#endif // NYI
static int snd_pmac_pcm_open(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
struct snd_pcm_runtime *runtime = subs->runtime;
int i;
/* look up frequency table and fill bit mask */
runtime->hw.rates = 0;
for (i = 0; i < chip->num_freqs; i++)
if (chip->freqs_ok & (1 << i))
runtime->hw.rates |=
snd_pcm_rate_to_rate_bit(chip->freq_table[i]);
/* check for minimum and maximum rates */
for (i = 0; i < chip->num_freqs; i++) {
if (chip->freqs_ok & (1 << i)) {
runtime->hw.rate_max = chip->freq_table[i];
break;
}
}
for (i = chip->num_freqs - 1; i >= 0; i--) {
if (chip->freqs_ok & (1 << i)) {
runtime->hw.rate_min = chip->freq_table[i];
break;
}
}
runtime->hw.formats = chip->formats_ok;
if (chip->can_capture) {
if (! chip->can_duplex)
runtime->hw.info |= SNDRV_PCM_INFO_HALF_DUPLEX;
runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
}
runtime->private_data = rec;
rec->substream = subs;
#if 0 /* FIXME: still under development.. */
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_pmac_hw_rule_rate, chip, rec->stream, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
snd_pmac_hw_rule_format, chip, rec->stream, -1);
#endif
runtime->hw.periods_max = rec->cmd.size - 1;
/* constraints to fix choppy sound */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
return 0;
}
static int snd_pmac_pcm_close(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
struct pmac_stream *astr;
snd_pmac_dma_stop(rec);
astr = snd_pmac_get_stream(chip, another_stream(rec->stream));
if (! astr)
return -EINVAL;
/* reset constraints */
astr->cur_freqs = chip->freqs_ok;
astr->cur_formats = chip->formats_ok;
return 0;
}
static int snd_pmac_playback_open(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
subs->runtime->hw = snd_pmac_playback;
return snd_pmac_pcm_open(chip, &chip->playback, subs);
}
static int snd_pmac_capture_open(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
subs->runtime->hw = snd_pmac_capture;
return snd_pmac_pcm_open(chip, &chip->capture, subs);
}
static int snd_pmac_playback_close(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_close(chip, &chip->playback, subs);
}
static int snd_pmac_capture_close(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_close(chip, &chip->capture, subs);
}
/*
*/
static struct snd_pcm_ops snd_pmac_playback_ops = {
.open = snd_pmac_playback_open,
.close = snd_pmac_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_pmac_pcm_hw_params,
.hw_free = snd_pmac_pcm_hw_free,
.prepare = snd_pmac_playback_prepare,
.trigger = snd_pmac_playback_trigger,
.pointer = snd_pmac_playback_pointer,
};
static struct snd_pcm_ops snd_pmac_capture_ops = {
.open = snd_pmac_capture_open,
.close = snd_pmac_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_pmac_pcm_hw_params,
.hw_free = snd_pmac_pcm_hw_free,
.prepare = snd_pmac_capture_prepare,
.trigger = snd_pmac_capture_trigger,
.pointer = snd_pmac_capture_pointer,
};
int __devinit snd_pmac_pcm_new(struct snd_pmac *chip)
{
struct snd_pcm *pcm;
int err;
int num_captures = 1;
if (! chip->can_capture)
num_captures = 0;
err = snd_pcm_new(chip->card, chip->card->driver, 0, 1, num_captures, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_pmac_playback_ops);
if (chip->can_capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_pmac_capture_ops);
pcm->private_data = chip;
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
strcpy(pcm->name, chip->card->shortname);
chip->pcm = pcm;
chip->formats_ok = SNDRV_PCM_FMTBIT_S16_BE;
if (chip->can_byte_swap)
chip->formats_ok |= SNDRV_PCM_FMTBIT_S16_LE;
chip->playback.cur_formats = chip->formats_ok;
chip->capture.cur_formats = chip->formats_ok;
chip->playback.cur_freqs = chip->freqs_ok;
chip->capture.cur_freqs = chip->freqs_ok;
/* preallocate 64k buffer */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pdev->dev,
64 * 1024, 64 * 1024);
return 0;
}
static void snd_pmac_dbdma_reset(struct snd_pmac *chip)
{
out_le32(&chip->playback.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
snd_pmac_wait_ack(&chip->playback);
out_le32(&chip->capture.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
snd_pmac_wait_ack(&chip->capture);
}
/*
* handling beep
*/
void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long addr, int speed)
{
struct pmac_stream *rec = &chip->playback;
snd_pmac_dma_stop(rec);
st_le16(&chip->extra_dma.cmds->req_count, bytes);
st_le16(&chip->extra_dma.cmds->xfer_status, 0);
st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr);
st_le32(&chip->extra_dma.cmds->phy_addr, addr);
st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS);
out_le32(&chip->awacs->control,
(in_le32(&chip->awacs->control) & ~0x1f00)
| (speed << 8));
out_le32(&chip->awacs->byteswap, 0);
snd_pmac_dma_set_command(rec, &chip->extra_dma);
snd_pmac_dma_run(rec, RUN);
}
void snd_pmac_beep_dma_stop(struct snd_pmac *chip)
{
snd_pmac_dma_stop(&chip->playback);
st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
snd_pmac_pcm_set_format(chip); /* reset format */
}
/*
* interrupt handlers
*/
static irqreturn_t
snd_pmac_tx_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
snd_pmac_pcm_update(chip, &chip->playback);
return IRQ_HANDLED;
}
static irqreturn_t
snd_pmac_rx_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
snd_pmac_pcm_update(chip, &chip->capture);
return IRQ_HANDLED;
}
static irqreturn_t
snd_pmac_ctrl_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
int ctrl = in_le32(&chip->awacs->control);
/*printk(KERN_DEBUG "pmac: control interrupt.. 0x%x\n", ctrl);*/
if (ctrl & MASK_PORTCHG) {
/* do something when headphone is plugged/unplugged? */
if (chip->update_automute)
chip->update_automute(chip, 1);
}
if (ctrl & MASK_CNTLERR) {
int err = (in_le32(&chip->awacs->codec_stat) & MASK_ERRCODE) >> 16;
if (err && chip->model <= PMAC_SCREAMER)
snd_printk(KERN_DEBUG "error %x\n", err);
}
/* Writing 1s to the CNTLERR and PORTCHG bits clears them... */
out_le32(&chip->awacs->control, ctrl);
return IRQ_HANDLED;
}
/*
* a wrapper to feature call for compatibility
*/
static void snd_pmac_sound_feature(struct snd_pmac *chip, int enable)
{
if (ppc_md.feature_call)
ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable);
}
/*
* release resources
*/
static int snd_pmac_free(struct snd_pmac *chip)
{
/* stop sounds */
if (chip->initialized) {
snd_pmac_dbdma_reset(chip);
/* disable interrupts from awacs interface */
out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff);
}
if (chip->node)
snd_pmac_sound_feature(chip, 0);
/* clean up mixer if any */
if (chip->mixer_free)
chip->mixer_free(chip);
snd_pmac_detach_beep(chip);
/* release resources */
if (chip->irq >= 0)
free_irq(chip->irq, (void*)chip);
if (chip->tx_irq >= 0)
free_irq(chip->tx_irq, (void*)chip);
if (chip->rx_irq >= 0)
free_irq(chip->rx_irq, (void*)chip);
snd_pmac_dbdma_free(chip, &chip->playback.cmd);
snd_pmac_dbdma_free(chip, &chip->capture.cmd);
snd_pmac_dbdma_free(chip, &chip->extra_dma);
snd_pmac_dbdma_free(chip, &emergency_dbdma);
if (chip->macio_base)
iounmap(chip->macio_base);
if (chip->latch_base)
iounmap(chip->latch_base);
if (chip->awacs)
iounmap(chip->awacs);
if (chip->playback.dma)
iounmap(chip->playback.dma);
if (chip->capture.dma)
iounmap(chip->capture.dma);
if (chip->node) {
int i;
for (i = 0; i < 3; i++) {
if (chip->requested & (1 << i))
release_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]));
}
}
if (chip->pdev)
pci_dev_put(chip->pdev);
of_node_put(chip->node);
kfree(chip);
return 0;
}
/*
* free the device
*/
static int snd_pmac_dev_free(struct snd_device *device)
{
struct snd_pmac *chip = device->device_data;
return snd_pmac_free(chip);
}
/*
* check the machine support byteswap (little-endian)
*/
static void __devinit detect_byte_swap(struct snd_pmac *chip)
{
struct device_node *mio;
/* if seems that Keylargo can't byte-swap */
for (mio = chip->node->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0) {
if (of_device_is_compatible(mio, "Keylargo"))
chip->can_byte_swap = 0;
break;
}
}
/* it seems the Pismo & iBook can't byte-swap in hardware. */
if (of_machine_is_compatible("PowerBook3,1") ||
of_machine_is_compatible("PowerBook2,1"))
chip->can_byte_swap = 0 ;
if (of_machine_is_compatible("PowerBook2,1"))
chip->can_duplex = 0;
}
/*
* detect a sound chip
*/
static int __devinit snd_pmac_detect(struct snd_pmac *chip)
{
struct device_node *sound;
struct device_node *dn;
const unsigned int *prop;
unsigned int l;
struct macio_chip* macio;
if (!machine_is(powermac))
return -ENODEV;
chip->subframe = 0;
chip->revision = 0;
chip->freqs_ok = 0xff; /* all ok */
chip->model = PMAC_AWACS;
chip->can_byte_swap = 1;
chip->can_duplex = 1;
chip->can_capture = 1;
chip->num_freqs = ARRAY_SIZE(awacs_freqs);
chip->freq_table = awacs_freqs;
chip->pdev = NULL;
chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */
/* check machine type */
if (of_machine_is_compatible("AAPL,3400/2400")
|| of_machine_is_compatible("AAPL,3500"))
chip->is_pbook_3400 = 1;
else if (of_machine_is_compatible("PowerBook1,1")
|| of_machine_is_compatible("AAPL,PowerBook1998"))
chip->is_pbook_G3 = 1;
chip->node = of_find_node_by_name(NULL, "awacs");
sound = of_node_get(chip->node);
/*
* powermac G3 models have a node called "davbus"
* with a child called "sound".
*/
if (!chip->node)
chip->node = of_find_node_by_name(NULL, "davbus");
/*
* if we didn't find a davbus device, try 'i2s-a' since
* this seems to be what iBooks have
*/
if (! chip->node) {
chip->node = of_find_node_by_name(NULL, "i2s-a");
if (chip->node && chip->node->parent &&
chip->node->parent->parent) {
if (of_device_is_compatible(chip->node->parent->parent,
"K2-Keylargo"))
chip->is_k2 = 1;
}
}
if (! chip->node)
return -ENODEV;
if (!sound) {
sound = of_find_node_by_name(NULL, "sound");
while (sound && sound->parent != chip->node)
sound = of_find_node_by_name(sound, "sound");
}
if (! sound) {
of_node_put(chip->node);
chip->node = NULL;
return -ENODEV;
}
prop = of_get_property(sound, "sub-frame", NULL);
if (prop && *prop < 16)
chip->subframe = *prop;
prop = of_get_property(sound, "layout-id", NULL);
if (prop) {
/* partly deprecate snd-powermac, for those machines
* that have a layout-id property for now */
printk(KERN_INFO "snd-powermac no longer handles any "
"machines with a layout-id property "
"in the device-tree, use snd-aoa.\n");
of_node_put(sound);
of_node_put(chip->node);
chip->node = NULL;
return -ENODEV;
}
/* This should be verified on older screamers */
if (of_device_is_compatible(sound, "screamer")) {
chip->model = PMAC_SCREAMER;
// chip->can_byte_swap = 0; /* FIXME: check this */
}
if (of_device_is_compatible(sound, "burgundy")) {
chip->model = PMAC_BURGUNDY;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "daca")) {
chip->model = PMAC_DACA;
chip->can_capture = 0; /* no capture */
chip->can_duplex = 0;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "tumbler")) {
chip->model = PMAC_TUMBLER;
chip->can_capture = of_machine_is_compatible("PowerMac4,2")
|| of_machine_is_compatible("PowerBook3,2")
|| of_machine_is_compatible("PowerBook3,3")
|| of_machine_is_compatible("PowerBook4,1")
|| of_machine_is_compatible("PowerBook4,2")
|| of_machine_is_compatible("PowerBook4,3");
chip->can_duplex = 0;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
chip->freq_table = tumbler_freqs;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "snapper")) {
chip->model = PMAC_SNAPPER;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
chip->freq_table = tumbler_freqs;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
prop = of_get_property(sound, "device-id", NULL);
if (prop)
chip->device_id = *prop;
dn = of_find_node_by_name(NULL, "perch");
chip->has_iic = (dn != NULL);
of_node_put(dn);
/* We need the PCI device for DMA allocations, let's use a crude method
* for now ...
*/
macio = macio_find(chip->node, macio_unknown);
if (macio == NULL)
printk(KERN_WARNING "snd-powermac: can't locate macio !\n");
else {
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev) {
struct device_node *np = pci_device_to_OF_node(pdev);
if (np && np == macio->of_node) {
chip->pdev = pdev;
break;
}
}
}
if (chip->pdev == NULL)
printk(KERN_WARNING "snd-powermac: can't locate macio PCI"
" device !\n");
detect_byte_swap(chip);
/* look for a property saying what sample rates
are available */
prop = of_get_property(sound, "sample-rates", &l);
if (! prop)
prop = of_get_property(sound, "output-frame-rates", &l);
if (prop) {
int i;
chip->freqs_ok = 0;
for (l /= sizeof(int); l > 0; --l) {
unsigned int r = *prop++;
/* Apple 'Fixed' format */
if (r >= 0x10000)
r >>= 16;
for (i = 0; i < chip->num_freqs; ++i) {
if (r == chip->freq_table[i]) {
chip->freqs_ok |= (1 << i);
break;
}
}
}
} else {
/* assume only 44.1khz */
chip->freqs_ok = 1;
}
of_node_put(sound);
return 0;
}
#ifdef PMAC_SUPPORT_AUTOMUTE
/*
* auto-mute
*/
static int pmac_auto_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = chip->auto_mute;
return 0;
}
static int pmac_auto_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
if (ucontrol->value.integer.value[0] != chip->auto_mute) {
chip->auto_mute = !!ucontrol->value.integer.value[0];
if (chip->update_automute)
chip->update_automute(chip, 1);
return 1;
}
return 0;
}
static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
if (chip->detect_headphone)
ucontrol->value.integer.value[0] = chip->detect_headphone(chip);
else
ucontrol->value.integer.value[0] = 0;
return 0;
}
static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Auto Mute Switch",
.info = snd_pmac_boolean_mono_info,
.get = pmac_auto_mute_get,
.put = pmac_auto_mute_put,
},
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detection",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = snd_pmac_boolean_mono_info,
.get = pmac_hp_detect_get,
},
};
int __devinit snd_pmac_add_automute(struct snd_pmac *chip)
{
int err;
chip->auto_mute = 1;
err = snd_ctl_add(chip->card, snd_ctl_new1(&auto_mute_controls[0], chip));
if (err < 0) {
printk(KERN_ERR "snd-powermac: Failed to add automute control\n");
return err;
}
chip->hp_detect_ctl = snd_ctl_new1(&auto_mute_controls[1], chip);
return snd_ctl_add(chip->card, chip->hp_detect_ctl);
}
#endif /* PMAC_SUPPORT_AUTOMUTE */
/*
* create and detect a pmac chip record
*/
int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
{
struct snd_pmac *chip;
struct device_node *np;
int i, err;
unsigned int irq;
unsigned long ctrl_addr, txdma_addr, rxdma_addr;
static struct snd_device_ops ops = {
.dev_free = snd_pmac_dev_free,
};
*chip_return = NULL;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->card = card;
spin_lock_init(&chip->reg_lock);
chip->irq = chip->tx_irq = chip->rx_irq = -1;
chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK;
chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE;
if ((err = snd_pmac_detect(chip)) < 0)
goto __error;
if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 ||
snd_pmac_dbdma_alloc(chip, &chip->capture.cmd, PMAC_MAX_FRAGS + 1) < 0 ||
snd_pmac_dbdma_alloc(chip, &chip->extra_dma, 2) < 0 ||
snd_pmac_dbdma_alloc(chip, &emergency_dbdma, 2) < 0) {
err = -ENOMEM;
goto __error;
}
np = chip->node;
chip->requested = 0;
if (chip->is_k2) {
static char *rnames[] = {
"Sound Control", "Sound DMA" };
for (i = 0; i < 2; i ++) {
if (of_address_to_resource(np->parent, i,
&chip->rsrc[i])) {
printk(KERN_ERR "snd: can't translate rsrc "
" %d (%s)\n", i, rnames[i]);
err = -ENODEV;
goto __error;
}
if (request_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]),
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
" %d (%s: %pR)\n",
i, rnames[i], &chip->rsrc[i]);
err = -ENODEV;
goto __error;
}
chip->requested |= (1 << i);
}
ctrl_addr = chip->rsrc[0].start;
txdma_addr = chip->rsrc[1].start;
rxdma_addr = txdma_addr + 0x100;
} else {
static char *rnames[] = {
"Sound Control", "Sound Tx DMA", "Sound Rx DMA" };
for (i = 0; i < 3; i ++) {
if (of_address_to_resource(np, i,
&chip->rsrc[i])) {
printk(KERN_ERR "snd: can't translate rsrc "
" %d (%s)\n", i, rnames[i]);
err = -ENODEV;
goto __error;
}
if (request_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]),
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
" %d (%s: %pR)\n",
i, rnames[i], &chip->rsrc[i]);
err = -ENODEV;
goto __error;
}
chip->requested |= (1 << i);
}
ctrl_addr = chip->rsrc[0].start;
txdma_addr = chip->rsrc[1].start;
rxdma_addr = chip->rsrc[2].start;
}
chip->awacs = ioremap(ctrl_addr, 0x1000);
chip->playback.dma = ioremap(txdma_addr, 0x100);
chip->capture.dma = ioremap(rxdma_addr, 0x100);
if (chip->model <= PMAC_BURGUNDY) {
irq = irq_of_parse_and_map(np, 0);
if (request_irq(irq, snd_pmac_ctrl_intr, 0,
"PMac", (void*)chip)) {
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n",
irq);
err = -EBUSY;
goto __error;
}
chip->irq = irq;
}
irq = irq_of_parse_and_map(np, 1);
if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
chip->tx_irq = irq;
irq = irq_of_parse_and_map(np, 2);
if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) {
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
chip->rx_irq = irq;
snd_pmac_sound_feature(chip, 1);
/* reset & enable interrupts */
if (chip->model <= PMAC_BURGUNDY)
out_le32(&chip->awacs->control, chip->control_mask);
/* Powerbooks have odd ways of enabling inputs such as
an expansion-bay CD or sound from an internal modem
or a PC-card modem. */
if (chip->is_pbook_3400) {
/* Enable CD and PC-card sound inputs. */
/* This is done by reading from address
* f301a000, + 0x10 to enable the expansion-bay
* CD sound input, + 0x80 to enable the PC-card
* sound input. The 0x100 enables the SCSI bus
* terminator power.
*/
chip->latch_base = ioremap (0xf301a000, 0x1000);
in_8(chip->latch_base + 0x190);
} else if (chip->is_pbook_G3) {
struct device_node* mio;
for (mio = chip->node->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0) {
struct resource r;
if (of_address_to_resource(mio, 0, &r) == 0)
chip->macio_base =
ioremap(r.start, 0x40);
break;
}
}
/* Enable CD sound input. */
/* The relevant bits for writing to this byte are 0x8f.
* I haven't found out what the 0x80 bit does.
* For the 0xf bits, writing 3 or 7 enables the CD
* input, any other value disables it. Values
* 1, 3, 5, 7 enable the microphone. Values 0, 2,
* 4, 6, 8 - f enable the input from the modem.
*/
if (chip->macio_base)
out_8(chip->macio_base + 0x37, 3);
}
/* Reset dbdma channels */
snd_pmac_dbdma_reset(chip);
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
goto __error;
*chip_return = chip;
return 0;
__error:
snd_pmac_free(chip);
return err;
}
/*
* sleep notify for powerbook
*/
#ifdef CONFIG_PM
/*
* Save state when going to sleep, restore it afterwards.
*/
void snd_pmac_suspend(struct snd_pmac *chip)
{
unsigned long flags;
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (chip->suspend)
chip->suspend(chip);
snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_pmac_beep_stop(chip);
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (chip->irq >= 0)
disable_irq(chip->irq);
if (chip->tx_irq >= 0)
disable_irq(chip->tx_irq);
if (chip->rx_irq >= 0)
disable_irq(chip->rx_irq);
snd_pmac_sound_feature(chip, 0);
}
void snd_pmac_resume(struct snd_pmac *chip)
{
snd_pmac_sound_feature(chip, 1);
if (chip->resume)
chip->resume(chip);
/* enable CD sound input */
if (chip->macio_base && chip->is_pbook_G3)
out_8(chip->macio_base + 0x37, 3);
else if (chip->is_pbook_3400)
in_8(chip->latch_base + 0x190);
snd_pmac_pcm_set_format(chip);
if (chip->irq >= 0)
enable_irq(chip->irq);
if (chip->tx_irq >= 0)
enable_irq(chip->tx_irq);
if (chip->rx_irq >= 0)
enable_irq(chip->rx_irq);
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
}
#endif /* CONFIG_PM */
| gpl-2.0 |
acuicultor/android_kernel_oneplus_msm8974 | drivers/tty/serial/clps711x.c | 6411 | 13324 | /*
* Driver for CLPS711x serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/hardware/clps7111.h>
#define UART_NR 2
#define SERIAL_CLPS711X_MAJOR 204
#define SERIAL_CLPS711X_MINOR 40
#define SERIAL_CLPS711X_NR UART_NR
/*
* We use the relevant SYSCON register as a base address for these ports.
*/
#define UBRLCR(port) ((port)->iobase + UBRLCR1 - SYSCON1)
#define UARTDR(port) ((port)->iobase + UARTDR1 - SYSCON1)
#define SYSFLG(port) ((port)->iobase + SYSFLG1 - SYSCON1)
#define SYSCON(port) ((port)->iobase + SYSCON1 - SYSCON1)
#define TX_IRQ(port) ((port)->irq)
#define RX_IRQ(port) ((port)->irq + 1)
#define UART_ANY_ERR (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR)
#define tx_enabled(port) ((port)->unused[0])
static void clps711xuart_stop_tx(struct uart_port *port)
{
if (tx_enabled(port)) {
disable_irq(TX_IRQ(port));
tx_enabled(port) = 0;
}
}
static void clps711xuart_start_tx(struct uart_port *port)
{
if (!tx_enabled(port)) {
enable_irq(TX_IRQ(port));
tx_enabled(port) = 1;
}
}
static void clps711xuart_stop_rx(struct uart_port *port)
{
disable_irq(RX_IRQ(port));
}
static void clps711xuart_enable_ms(struct uart_port *port)
{
}
static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct tty_struct *tty = port->state->port.tty;
unsigned int status, ch, flg;
status = clps_readl(SYSFLG(port));
while (!(status & SYSFLG_URXFE)) {
ch = clps_readl(UARTDR(port));
port->icount.rx++;
flg = TTY_NORMAL;
/*
* Note that the error handling code is
* out of the main execution path
*/
if (unlikely(ch & UART_ANY_ERR)) {
if (ch & UARTDR_PARERR)
port->icount.parity++;
else if (ch & UARTDR_FRMERR)
port->icount.frame++;
if (ch & UARTDR_OVERR)
port->icount.overrun++;
ch &= port->read_status_mask;
if (ch & UARTDR_PARERR)
flg = TTY_PARITY;
else if (ch & UARTDR_FRMERR)
flg = TTY_FRAME;
#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
#endif
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
/*
* CHECK: does overrun affect the current character?
* ASSUMPTION: it does not.
*/
uart_insert_char(port, ch, UARTDR_OVERR, ch, flg);
ignore_char:
status = clps_readl(SYSFLG(port));
}
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct circ_buf *xmit = &port->state->xmit;
int count;
if (port->x_char) {
clps_writel(port->x_char, UARTDR(port));
port->icount.tx++;
port->x_char = 0;
return IRQ_HANDLED;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
goto disable_tx_irq;
count = port->fifosize >> 1;
do {
clps_writel(xmit->buf[xmit->tail], UARTDR(port));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit)) {
disable_tx_irq:
disable_irq_nosync(TX_IRQ(port));
tx_enabled(port) = 0;
}
return IRQ_HANDLED;
}
static unsigned int clps711xuart_tx_empty(struct uart_port *port)
{
unsigned int status = clps_readl(SYSFLG(port));
return status & SYSFLG_UBUSY ? 0 : TIOCSER_TEMT;
}
static unsigned int clps711xuart_get_mctrl(struct uart_port *port)
{
unsigned int port_addr;
unsigned int result = 0;
unsigned int status;
port_addr = SYSFLG(port);
if (port_addr == SYSFLG1) {
status = clps_readl(SYSFLG1);
if (status & SYSFLG1_DCD)
result |= TIOCM_CAR;
if (status & SYSFLG1_DSR)
result |= TIOCM_DSR;
if (status & SYSFLG1_CTS)
result |= TIOCM_CTS;
}
return result;
}
static void
clps711xuart_set_mctrl_null(struct uart_port *port, unsigned int mctrl)
{
}
static void clps711xuart_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
unsigned int ubrlcr;
spin_lock_irqsave(&port->lock, flags);
ubrlcr = clps_readl(UBRLCR(port));
if (break_state == -1)
ubrlcr |= UBRLCR_BREAK;
else
ubrlcr &= ~UBRLCR_BREAK;
clps_writel(ubrlcr, UBRLCR(port));
spin_unlock_irqrestore(&port->lock, flags);
}
static int clps711xuart_startup(struct uart_port *port)
{
unsigned int syscon;
int retval;
tx_enabled(port) = 1;
/*
* Allocate the IRQs
*/
retval = request_irq(TX_IRQ(port), clps711xuart_int_tx, 0,
"clps711xuart_tx", port);
if (retval)
return retval;
retval = request_irq(RX_IRQ(port), clps711xuart_int_rx, 0,
"clps711xuart_rx", port);
if (retval) {
free_irq(TX_IRQ(port), port);
return retval;
}
/*
* enable the port
*/
syscon = clps_readl(SYSCON(port));
syscon |= SYSCON_UARTEN;
clps_writel(syscon, SYSCON(port));
return 0;
}
static void clps711xuart_shutdown(struct uart_port *port)
{
unsigned int ubrlcr, syscon;
/*
* Free the interrupt
*/
free_irq(TX_IRQ(port), port); /* TX interrupt */
free_irq(RX_IRQ(port), port); /* RX interrupt */
/*
* disable the port
*/
syscon = clps_readl(SYSCON(port));
syscon &= ~SYSCON_UARTEN;
clps_writel(syscon, SYSCON(port));
/*
* disable break condition and fifos
*/
ubrlcr = clps_readl(UBRLCR(port));
ubrlcr &= ~(UBRLCR_FIFOEN | UBRLCR_BREAK);
clps_writel(ubrlcr, UBRLCR(port));
}
static void
clps711xuart_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned int ubrlcr, baud, quot;
unsigned long flags;
/*
* We don't implement CREAD.
*/
termios->c_cflag |= CREAD;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
switch (termios->c_cflag & CSIZE) {
case CS5:
ubrlcr = UBRLCR_WRDLEN5;
break;
case CS6:
ubrlcr = UBRLCR_WRDLEN6;
break;
case CS7:
ubrlcr = UBRLCR_WRDLEN7;
break;
default: // CS8
ubrlcr = UBRLCR_WRDLEN8;
break;
}
if (termios->c_cflag & CSTOPB)
ubrlcr |= UBRLCR_XSTOP;
if (termios->c_cflag & PARENB) {
ubrlcr |= UBRLCR_PRTEN;
if (!(termios->c_cflag & PARODD))
ubrlcr |= UBRLCR_EVENPRT;
}
if (port->fifosize > 1)
ubrlcr |= UBRLCR_FIFOEN;
spin_lock_irqsave(&port->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = UARTDR_OVERR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UARTDR_PARERR | UARTDR_FRMERR;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UARTDR_FRMERR | UARTDR_PARERR;
if (termios->c_iflag & IGNBRK) {
/*
* If we're ignoring parity and break indicators,
* ignore overruns to (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UARTDR_OVERR;
}
quot -= 1;
clps_writel(ubrlcr | quot, UBRLCR(port));
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *clps711xuart_type(struct uart_port *port)
{
return port->type == PORT_CLPS711X ? "CLPS711x" : NULL;
}
/*
* Configure/autoconfigure the port.
*/
static void clps711xuart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_CLPS711X;
}
static void clps711xuart_release_port(struct uart_port *port)
{
}
static int clps711xuart_request_port(struct uart_port *port)
{
return 0;
}
static struct uart_ops clps711x_pops = {
.tx_empty = clps711xuart_tx_empty,
.set_mctrl = clps711xuart_set_mctrl_null,
.get_mctrl = clps711xuart_get_mctrl,
.stop_tx = clps711xuart_stop_tx,
.start_tx = clps711xuart_start_tx,
.stop_rx = clps711xuart_stop_rx,
.enable_ms = clps711xuart_enable_ms,
.break_ctl = clps711xuart_break_ctl,
.startup = clps711xuart_startup,
.shutdown = clps711xuart_shutdown,
.set_termios = clps711xuart_set_termios,
.type = clps711xuart_type,
.config_port = clps711xuart_config_port,
.release_port = clps711xuart_release_port,
.request_port = clps711xuart_request_port,
};
static struct uart_port clps711x_ports[UART_NR] = {
{
.iobase = SYSCON1,
.irq = IRQ_UTXINT1, /* IRQ_URXINT1, IRQ_UMSINT */
.uartclk = 3686400,
.fifosize = 16,
.ops = &clps711x_pops,
.line = 0,
.flags = UPF_BOOT_AUTOCONF,
},
{
.iobase = SYSCON2,
.irq = IRQ_UTXINT2, /* IRQ_URXINT2 */
.uartclk = 3686400,
.fifosize = 16,
.ops = &clps711x_pops,
.line = 1,
.flags = UPF_BOOT_AUTOCONF,
}
};
#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
static void clps711xuart_console_putchar(struct uart_port *port, int ch)
{
while (clps_readl(SYSFLG(port)) & SYSFLG_UTXFF)
barrier();
clps_writel(ch, UARTDR(port));
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*
* Note that this is called with interrupts already disabled
*/
static void
clps711xuart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = clps711x_ports + co->index;
unsigned int status, syscon;
/*
* Ensure that the port is enabled.
*/
syscon = clps_readl(SYSCON(port));
clps_writel(syscon | SYSCON_UARTEN, SYSCON(port));
uart_console_write(port, s, count, clps711xuart_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the uart state.
*/
do {
status = clps_readl(SYSFLG(port));
} while (status & SYSFLG_UBUSY);
clps_writel(syscon, SYSCON(port));
}
static void __init
clps711xuart_console_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
if (clps_readl(SYSCON(port)) & SYSCON_UARTEN) {
unsigned int ubrlcr, quot;
ubrlcr = clps_readl(UBRLCR(port));
*parity = 'n';
if (ubrlcr & UBRLCR_PRTEN) {
if (ubrlcr & UBRLCR_EVENPRT)
*parity = 'e';
else
*parity = 'o';
}
if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
*bits = 7;
else
*bits = 8;
quot = ubrlcr & UBRLCR_BAUD_MASK;
*baud = port->uartclk / (16 * (quot + 1));
}
}
static int __init clps711xuart_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 38400;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
port = uart_get_console(clps711x_ports, UART_NR, co);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
clps711xuart_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver clps711x_reg;
static struct console clps711x_console = {
.name = "ttyCL",
.write = clps711xuart_console_write,
.device = uart_console_device,
.setup = clps711xuart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &clps711x_reg,
};
static int __init clps711xuart_console_init(void)
{
register_console(&clps711x_console);
return 0;
}
console_initcall(clps711xuart_console_init);
#define CLPS711X_CONSOLE &clps711x_console
#else
#define CLPS711X_CONSOLE NULL
#endif
static struct uart_driver clps711x_reg = {
.driver_name = "ttyCL",
.dev_name = "ttyCL",
.major = SERIAL_CLPS711X_MAJOR,
.minor = SERIAL_CLPS711X_MINOR,
.nr = UART_NR,
.cons = CLPS711X_CONSOLE,
};
static int __init clps711xuart_init(void)
{
int ret, i;
printk(KERN_INFO "Serial: CLPS711x driver\n");
ret = uart_register_driver(&clps711x_reg);
if (ret)
return ret;
for (i = 0; i < UART_NR; i++)
uart_add_one_port(&clps711x_reg, &clps711x_ports[i]);
return 0;
}
static void __exit clps711xuart_exit(void)
{
int i;
for (i = 0; i < UART_NR; i++)
uart_remove_one_port(&clps711x_reg, &clps711x_ports[i]);
uart_unregister_driver(&clps711x_reg);
}
module_init(clps711xuart_init);
module_exit(clps711xuart_exit);
MODULE_AUTHOR("Deep Blue Solutions Ltd");
MODULE_DESCRIPTION("CLPS-711x generic serial driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(SERIAL_CLPS711X_MAJOR, SERIAL_CLPS711X_MINOR);
| gpl-2.0 |
jmztaylor/android_kernel_htc_k2plccl | arch/mips/powertv/powertv_setup.c | 7691 | 9628 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
* Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <linux/notifier.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/ctype.h>
#include <linux/cpu.h>
#include <linux/time.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
#include <asm/dma.h>
#include <asm/asm.h>
#include <asm/traps.h>
#include <asm/asm-offsets.h>
#include "reset.h"
#define VAL(n) STR(n)
/*
* Macros for loading addresses and storing registers:
* LONG_L_ Stringified version of LONG_L for use in asm() statement
* LONG_S_ Stringified version of LONG_S for use in asm() statement
* PTR_LA_ Stringified version of PTR_LA for use in asm() statement
* REG_SIZE Number of 8-bit bytes in a full width register
*/
#define LONG_L_ VAL(LONG_L) " "
#define LONG_S_ VAL(LONG_S) " "
#define PTR_LA_ VAL(PTR_LA) " "
#ifdef CONFIG_64BIT
#warning TODO: 64-bit code needs to be verified
#define REG_SIZE "8" /* In bytes */
#endif
#ifdef CONFIG_32BIT
#define REG_SIZE "4" /* In bytes */
#endif
static void register_panic_notifier(void);
static int panic_handler(struct notifier_block *notifier_block,
unsigned long event, void *cause_string);
const char *get_system_type(void)
{
return "PowerTV";
}
void __init plat_mem_setup(void)
{
panic_on_oops = 1;
register_panic_notifier();
#if 0
mips_pcibios_init();
#endif
mips_reboot_setup();
}
/*
* Install a panic notifier for platform-specific diagnostics
*/
static void register_panic_notifier()
{
static struct notifier_block panic_notifier = {
.notifier_call = panic_handler,
.next = NULL,
.priority = INT_MAX
};
atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
}
static int panic_handler(struct notifier_block *notifier_block,
unsigned long event, void *cause_string)
{
struct pt_regs my_regs;
/* Save all of the registers */
{
unsigned long at, v0, v1; /* Must be on the stack */
/* Start by saving $at and v0 on the stack. We use $at
* ourselves, but it looks like the compiler may use v0 or v1
* to load the address of the pt_regs structure. We'll come
* back later to store the registers in the pt_regs
* structure. */
__asm__ __volatile__ (
".set noat\n"
LONG_S_ "$at, %[at]\n"
LONG_S_ "$2, %[v0]\n"
LONG_S_ "$3, %[v1]\n"
:
[at] "=m" (at),
[v0] "=m" (v0),
[v1] "=m" (v1)
:
: "at"
);
__asm__ __volatile__ (
".set noat\n"
"move $at, %[pt_regs]\n"
/* Argument registers */
LONG_S_ "$4, " VAL(PT_R4) "($at)\n"
LONG_S_ "$5, " VAL(PT_R5) "($at)\n"
LONG_S_ "$6, " VAL(PT_R6) "($at)\n"
LONG_S_ "$7, " VAL(PT_R7) "($at)\n"
/* Temporary regs */
LONG_S_ "$8, " VAL(PT_R8) "($at)\n"
LONG_S_ "$9, " VAL(PT_R9) "($at)\n"
LONG_S_ "$10, " VAL(PT_R10) "($at)\n"
LONG_S_ "$11, " VAL(PT_R11) "($at)\n"
LONG_S_ "$12, " VAL(PT_R12) "($at)\n"
LONG_S_ "$13, " VAL(PT_R13) "($at)\n"
LONG_S_ "$14, " VAL(PT_R14) "($at)\n"
LONG_S_ "$15, " VAL(PT_R15) "($at)\n"
/* "Saved" registers */
LONG_S_ "$16, " VAL(PT_R16) "($at)\n"
LONG_S_ "$17, " VAL(PT_R17) "($at)\n"
LONG_S_ "$18, " VAL(PT_R18) "($at)\n"
LONG_S_ "$19, " VAL(PT_R19) "($at)\n"
LONG_S_ "$20, " VAL(PT_R20) "($at)\n"
LONG_S_ "$21, " VAL(PT_R21) "($at)\n"
LONG_S_ "$22, " VAL(PT_R22) "($at)\n"
LONG_S_ "$23, " VAL(PT_R23) "($at)\n"
/* Add'l temp regs */
LONG_S_ "$24, " VAL(PT_R24) "($at)\n"
LONG_S_ "$25, " VAL(PT_R25) "($at)\n"
/* Kernel temp regs */
LONG_S_ "$26, " VAL(PT_R26) "($at)\n"
LONG_S_ "$27, " VAL(PT_R27) "($at)\n"
/* Global pointer, stack pointer, frame pointer and
* return address */
LONG_S_ "$gp, " VAL(PT_R28) "($at)\n"
LONG_S_ "$sp, " VAL(PT_R29) "($at)\n"
LONG_S_ "$fp, " VAL(PT_R30) "($at)\n"
LONG_S_ "$ra, " VAL(PT_R31) "($at)\n"
/* Now we can get the $at and v0 registers back and
* store them */
LONG_L_ "$8, %[at]\n"
LONG_S_ "$8, " VAL(PT_R1) "($at)\n"
LONG_L_ "$8, %[v0]\n"
LONG_S_ "$8, " VAL(PT_R2) "($at)\n"
LONG_L_ "$8, %[v1]\n"
LONG_S_ "$8, " VAL(PT_R3) "($at)\n"
:
:
[at] "m" (at),
[v0] "m" (v0),
[v1] "m" (v1),
[pt_regs] "r" (&my_regs)
: "at", "t0"
);
/* Set the current EPC value to be the current location in this
* function */
__asm__ __volatile__ (
".set noat\n"
"1:\n"
PTR_LA_ "$at, 1b\n"
LONG_S_ "$at, %[cp0_epc]\n"
:
[cp0_epc] "=m" (my_regs.cp0_epc)
:
: "at"
);
my_regs.cp0_cause = read_c0_cause();
my_regs.cp0_status = read_c0_status();
}
pr_crit("I'm feeling a bit sleepy. hmmmmm... perhaps a nap would... "
"zzzz... \n");
return NOTIFY_DONE;
}
/* Information about the RF MAC address, if one was supplied on the
* command line. */
static bool have_rfmac;
static u8 rfmac[ETH_ALEN];
static int rfmac_param(char *p)
{
u8 *q;
bool is_high_nibble;
int c;
/* Skip a leading "0x", if present */
if (*p == '0' && *(p+1) == 'x')
p += 2;
q = rfmac;
is_high_nibble = true;
for (c = (unsigned char) *p++;
isxdigit(c) && q - rfmac < ETH_ALEN;
c = (unsigned char) *p++) {
int nibble;
nibble = (isdigit(c) ? (c - '0') :
(isupper(c) ? c - 'A' + 10 : c - 'a' + 10));
if (is_high_nibble)
*q = nibble << 4;
else
*q++ |= nibble;
is_high_nibble = !is_high_nibble;
}
/* If we parsed all the way to the end of the parameter value and
* parsed all ETH_ALEN bytes, we have a usable RF MAC address */
have_rfmac = (c == '\0' && q - rfmac == ETH_ALEN);
return 0;
}
early_param("rfmac", rfmac_param);
/*
* Generate an Ethernet MAC address that has a good chance of being unique.
* @addr: Pointer to six-byte array containing the Ethernet address
* Generates an Ethernet MAC address that is highly likely to be unique for
* this particular system on a network with other systems of the same type.
*
* The problem we are solving is that, when random_ether_addr() is used to
* generate MAC addresses at startup, there isn't much entropy for the random
* number generator to use and the addresses it produces are fairly likely to
* be the same as those of other identical systems on the same local network.
* This is true even for relatively small numbers of systems (for the reason
* why, see the Wikipedia entry for "Birthday problem" at:
* http://en.wikipedia.org/wiki/Birthday_problem
*
* The good news is that we already have a MAC address known to be unique, the
* RF MAC address. The bad news is that this address is already in use on the
* RF interface. Worse, the obvious trick, taking the RF MAC address and
* turning on the locally managed bit, has already been used for other devices.
* Still, this does give us something to work with.
*
* The approach we take is:
* 1. If we can't get the RF MAC Address, just call random_ether_addr.
* 2. Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
* bits of the new address. This is very likely to be unique, except for
* the current box.
* 3. To avoid using addresses already on the current box, we set the top
* six bits of the address with a value different from any currently
* registered Scientific Atlanta organizationally unique identifyer
* (OUI). This avoids duplication with any addresses on the system that
* were generated from valid Scientific Atlanta-registered address by
* simply flipping the locally managed bit.
* 4. We aren't generating a multicast address, so we leave the multicast
* bit off. Since we aren't using a registered address, we have to set
* the locally managed bit.
* 5. We then randomly generate the remaining 16-bits. This does two
* things:
* a. It allows us to call this function for more than one device
* in this system
* b. It ensures that things will probably still work even if
* some device on the device network has a locally managed
* address that matches the top six bits from step 2.
*/
void platform_random_ether_addr(u8 addr[ETH_ALEN])
{
const int num_random_bytes = 2;
const unsigned char non_sciatl_oui_bits = 0xc0u;
const unsigned char mac_addr_locally_managed = (1 << 1);
if (!have_rfmac) {
pr_warning("rfmac not available on command line; "
"generating random MAC address\n");
random_ether_addr(addr);
}
else {
int i;
/* Set the first byte to something that won't match a Scientific
* Atlanta OUI, is locally managed, and isn't a multicast
* address */
addr[0] = non_sciatl_oui_bits | mac_addr_locally_managed;
/* Get some bytes of random address information */
get_random_bytes(&addr[1], num_random_bytes);
/* Copy over the NIC-specific bits of the RF MAC address */
for (i = 1 + num_random_bytes; i < ETH_ALEN; i++)
addr[i] = rfmac[i];
}
}
| gpl-2.0 |
MechanicalAndroids/android_kernel_motorola_msm8226 | drivers/tty/serial/8250/8250_pnp.c | 7947 | 14620 | /*
* Probe module for 8250/16550-type ISAPNP serial ports.
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
*
* Ported to the Linux PnP Layer - (C) Adam Belay.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/serial_core.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include "8250.h"
#define UNKNOWN_DEV 0x3000
static const struct pnp_device_id pnp_dev_table[] = {
/* Archtek America Corp. */
/* Archtek SmartLink Modem 3334BT Plug & Play */
{ "AAC000F", 0 },
/* Anchor Datacomm BV */
/* SXPro 144 External Data Fax Modem Plug & Play */
{ "ADC0001", 0 },
/* SXPro 288 External Data Fax Modem Plug & Play */
{ "ADC0002", 0 },
/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
{ "AEI0250", 0 },
/* Actiontec ISA PNP 56K X2 Fax Modem */
{ "AEI1240", 0 },
/* Rockwell 56K ACF II Fax+Data+Voice Modem */
{ "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ },
/* AZT3005 PnP SOUND DEVICE */
{ "AZT4001", 0 },
/* Best Data Products Inc. Smart One 336F PnP Modem */
{ "BDP3336", 0 },
/* Boca Research */
/* Boca Complete Ofc Communicator 14.4 Data-FAX */
{ "BRI0A49", 0 },
/* Boca Research 33,600 ACF Modem */
{ "BRI1400", 0 },
/* Boca 33.6 Kbps Internal FD34FSVD */
{ "BRI3400", 0 },
/* Boca 33.6 Kbps Internal FD34FSVD */
{ "BRI0A49", 0 },
/* Best Data Products Inc. Smart One 336F PnP Modem */
{ "BDP3336", 0 },
/* Computer Peripherals Inc */
/* EuroViVa CommCenter-33.6 SP PnP */
{ "CPI4050", 0 },
/* Creative Labs */
/* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
{ "CTL3001", 0 },
/* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
{ "CTL3011", 0 },
/* Davicom ISA 33.6K Modem */
{ "DAV0336", 0 },
/* Creative */
/* Creative Modem Blaster Flash56 DI5601-1 */
{ "DMB1032", 0 },
/* Creative Modem Blaster V.90 DI5660 */
{ "DMB2001", 0 },
/* E-Tech */
/* E-Tech CyberBULLET PC56RVP */
{ "ETT0002", 0 },
/* FUJITSU */
/* Fujitsu 33600 PnP-I2 R Plug & Play */
{ "FUJ0202", 0 },
/* Fujitsu FMV-FX431 Plug & Play */
{ "FUJ0205", 0 },
/* Fujitsu 33600 PnP-I4 R Plug & Play */
{ "FUJ0206", 0 },
/* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
{ "FUJ0209", 0 },
/* Archtek America Corp. */
/* Archtek SmartLink Modem 3334BT Plug & Play */
{ "GVC000F", 0 },
/* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */
{ "GVC0303", 0 },
/* Hayes */
/* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
{ "HAY0001", 0 },
/* Hayes Optima 336 V.34 + FAX + Voice PnP */
{ "HAY000C", 0 },
/* Hayes Optima 336B V.34 + FAX + Voice PnP */
{ "HAY000D", 0 },
/* Hayes Accura 56K Ext Fax Modem PnP */
{ "HAY5670", 0 },
/* Hayes Accura 56K Ext Fax Modem PnP */
{ "HAY5674", 0 },
/* Hayes Accura 56K Fax Modem PnP */
{ "HAY5675", 0 },
/* Hayes 288, V.34 + FAX */
{ "HAYF000", 0 },
/* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
{ "HAYF001", 0 },
/* IBM */
/* IBM Thinkpad 701 Internal Modem Voice */
{ "IBM0033", 0 },
/* Intermec */
/* Intermec CV60 touchscreen port */
{ "PNP4972", 0 },
/* Intertex */
/* Intertex 28k8 33k6 Voice EXT PnP */
{ "IXDC801", 0 },
/* Intertex 33k6 56k Voice EXT PnP */
{ "IXDC901", 0 },
/* Intertex 28k8 33k6 Voice SP EXT PnP */
{ "IXDD801", 0 },
/* Intertex 33k6 56k Voice SP EXT PnP */
{ "IXDD901", 0 },
/* Intertex 28k8 33k6 Voice SP INT PnP */
{ "IXDF401", 0 },
/* Intertex 28k8 33k6 Voice SP EXT PnP */
{ "IXDF801", 0 },
/* Intertex 33k6 56k Voice SP EXT PnP */
{ "IXDF901", 0 },
/* Kortex International */
/* KORTEX 28800 Externe PnP */
{ "KOR4522", 0 },
/* KXPro 33.6 Vocal ASVD PnP */
{ "KORF661", 0 },
/* Lasat */
/* LASAT Internet 33600 PnP */
{ "LAS4040", 0 },
/* Lasat Safire 560 PnP */
{ "LAS4540", 0 },
/* Lasat Safire 336 PnP */
{ "LAS5440", 0 },
/* Microcom, Inc. */
/* Microcom TravelPorte FAST V.34 Plug & Play */
{ "MNP0281", 0 },
/* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
{ "MNP0336", 0 },
/* Microcom DeskPorte FAST EP 28.8 Plug & Play */
{ "MNP0339", 0 },
/* Microcom DeskPorte 28.8P Plug & Play */
{ "MNP0342", 0 },
/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
{ "MNP0500", 0 },
/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
{ "MNP0501", 0 },
/* Microcom DeskPorte 28.8S Internal Plug & Play */
{ "MNP0502", 0 },
/* Motorola */
/* Motorola BitSURFR Plug & Play */
{ "MOT1105", 0 },
/* Motorola TA210 Plug & Play */
{ "MOT1111", 0 },
/* Motorola HMTA 200 (ISDN) Plug & Play */
{ "MOT1114", 0 },
/* Motorola BitSURFR Plug & Play */
{ "MOT1115", 0 },
/* Motorola Lifestyle 28.8 Internal */
{ "MOT1190", 0 },
/* Motorola V.3400 Plug & Play */
{ "MOT1501", 0 },
/* Motorola Lifestyle 28.8 V.34 Plug & Play */
{ "MOT1502", 0 },
/* Motorola Power 28.8 V.34 Plug & Play */
{ "MOT1505", 0 },
/* Motorola ModemSURFR External 28.8 Plug & Play */
{ "MOT1509", 0 },
/* Motorola Premier 33.6 Desktop Plug & Play */
{ "MOT150A", 0 },
/* Motorola VoiceSURFR 56K External PnP */
{ "MOT150F", 0 },
/* Motorola ModemSURFR 56K External PnP */
{ "MOT1510", 0 },
/* Motorola ModemSURFR 56K Internal PnP */
{ "MOT1550", 0 },
/* Motorola ModemSURFR Internal 28.8 Plug & Play */
{ "MOT1560", 0 },
/* Motorola Premier 33.6 Internal Plug & Play */
{ "MOT1580", 0 },
/* Motorola OnlineSURFR 28.8 Internal Plug & Play */
{ "MOT15B0", 0 },
/* Motorola VoiceSURFR 56K Internal PnP */
{ "MOT15F0", 0 },
/* Com 1 */
/* Deskline K56 Phone System PnP */
{ "MVX00A1", 0 },
/* PC Rider K56 Phone System PnP */
{ "MVX00F2", 0 },
/* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
{ "nEC8241", 0 },
/* Pace 56 Voice Internal Plug & Play Modem */
{ "PMC2430", 0 },
/* Generic */
/* Generic standard PC COM port */
{ "PNP0500", 0 },
/* Generic 16550A-compatible COM port */
{ "PNP0501", 0 },
/* Compaq 14400 Modem */
{ "PNPC000", 0 },
/* Compaq 2400/9600 Modem */
{ "PNPC001", 0 },
/* Dial-Up Networking Serial Cable between 2 PCs */
{ "PNPC031", 0 },
/* Dial-Up Networking Parallel Cable between 2 PCs */
{ "PNPC032", 0 },
/* Standard 9600 bps Modem */
{ "PNPC100", 0 },
/* Standard 14400 bps Modem */
{ "PNPC101", 0 },
/* Standard 28800 bps Modem*/
{ "PNPC102", 0 },
/* Standard Modem*/
{ "PNPC103", 0 },
/* Standard 9600 bps Modem*/
{ "PNPC104", 0 },
/* Standard 14400 bps Modem*/
{ "PNPC105", 0 },
/* Standard 28800 bps Modem*/
{ "PNPC106", 0 },
/* Standard Modem */
{ "PNPC107", 0 },
/* Standard 9600 bps Modem */
{ "PNPC108", 0 },
/* Standard 14400 bps Modem */
{ "PNPC109", 0 },
/* Standard 28800 bps Modem */
{ "PNPC10A", 0 },
/* Standard Modem */
{ "PNPC10B", 0 },
/* Standard 9600 bps Modem */
{ "PNPC10C", 0 },
/* Standard 14400 bps Modem */
{ "PNPC10D", 0 },
/* Standard 28800 bps Modem */
{ "PNPC10E", 0 },
/* Standard Modem */
{ "PNPC10F", 0 },
/* Standard PCMCIA Card Modem */
{ "PNP2000", 0 },
/* Rockwell */
/* Modular Technology */
/* Rockwell 33.6 DPF Internal PnP */
/* Modular Technology 33.6 Internal PnP */
{ "ROK0030", 0 },
/* Kortex International */
/* KORTEX 14400 Externe PnP */
{ "ROK0100", 0 },
/* Rockwell 28.8 */
{ "ROK4120", 0 },
/* Viking Components, Inc */
/* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
{ "ROK4920", 0 },
/* Rockwell */
/* British Telecom */
/* Modular Technology */
/* Rockwell 33.6 DPF External PnP */
/* BT Prologue 33.6 External PnP */
/* Modular Technology 33.6 External PnP */
{ "RSS00A0", 0 },
/* Viking 56K FAX INT */
{ "RSS0262", 0 },
/* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
{ "RSS0250", 0 },
/* SupraExpress 28.8 Data/Fax PnP modem */
{ "SUP1310", 0 },
/* SupraExpress 336i PnP Voice Modem */
{ "SUP1381", 0 },
/* SupraExpress 33.6 Data/Fax PnP modem */
{ "SUP1421", 0 },
/* SupraExpress 33.6 Data/Fax PnP modem */
{ "SUP1590", 0 },
/* SupraExpress 336i Sp ASVD */
{ "SUP1620", 0 },
/* SupraExpress 33.6 Data/Fax PnP modem */
{ "SUP1760", 0 },
/* SupraExpress 56i Sp Intl */
{ "SUP2171", 0 },
/* Phoebe Micro */
/* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
{ "TEX0011", 0 },
/* Archtek America Corp. */
/* Archtek SmartLink Modem 3334BT Plug & Play */
{ "UAC000F", 0 },
/* 3Com Corp. */
/* Gateway Telepath IIvi 33.6 */
{ "USR0000", 0 },
/* U.S. Robotics Sporster 33.6K Fax INT PnP */
{ "USR0002", 0 },
/* Sportster Vi 14.4 PnP FAX Voicemail */
{ "USR0004", 0 },
/* U.S. Robotics 33.6K Voice INT PnP */
{ "USR0006", 0 },
/* U.S. Robotics 33.6K Voice EXT PnP */
{ "USR0007", 0 },
/* U.S. Robotics Courier V.Everything INT PnP */
{ "USR0009", 0 },
/* U.S. Robotics 33.6K Voice INT PnP */
{ "USR2002", 0 },
/* U.S. Robotics 56K Voice INT PnP */
{ "USR2070", 0 },
/* U.S. Robotics 56K Voice EXT PnP */
{ "USR2080", 0 },
/* U.S. Robotics 56K FAX INT */
{ "USR3031", 0 },
/* U.S. Robotics 56K FAX INT */
{ "USR3050", 0 },
/* U.S. Robotics 56K Voice INT PnP */
{ "USR3070", 0 },
/* U.S. Robotics 56K Voice EXT PnP */
{ "USR3080", 0 },
/* U.S. Robotics 56K Voice INT PnP */
{ "USR3090", 0 },
/* U.S. Robotics 56K Message */
{ "USR9100", 0 },
/* U.S. Robotics 56K FAX EXT PnP*/
{ "USR9160", 0 },
/* U.S. Robotics 56K FAX INT PnP*/
{ "USR9170", 0 },
/* U.S. Robotics 56K Voice EXT PnP*/
{ "USR9180", 0 },
/* U.S. Robotics 56K Voice INT PnP*/
{ "USR9190", 0 },
/* Wacom tablets */
{ "WACFXXX", 0 },
/* Compaq touchscreen */
{ "FPI2002", 0 },
/* Fujitsu Stylistic touchscreens */
{ "FUJ02B2", 0 },
{ "FUJ02B3", 0 },
/* Fujitsu Stylistic LT touchscreens */
{ "FUJ02B4", 0 },
/* Passive Fujitsu Stylistic touchscreens */
{ "FUJ02B6", 0 },
{ "FUJ02B7", 0 },
{ "FUJ02B8", 0 },
{ "FUJ02B9", 0 },
{ "FUJ02BC", 0 },
/* Fujitsu Wacom Tablet PC device */
{ "FUJ02E5", 0 },
/* Fujitsu P-series tablet PC device */
{ "FUJ02E6", 0 },
/* Fujitsu Wacom 2FGT Tablet PC device */
{ "FUJ02E7", 0 },
/* Fujitsu Wacom 1FGT Tablet PC device */
{ "FUJ02E9", 0 },
/*
* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
* disguise)
*/
{ "LTS0001", 0 },
/* Rockwell's (PORALiNK) 33600 INT PNP */
{ "WCI0003", 0 },
/* Unknown PnP modems */
{ "PNPCXXX", UNKNOWN_DEV },
/* More unknown PnP modems */
{ "PNPDXXX", UNKNOWN_DEV },
{ "", 0 }
};
MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
static char *modem_names[] __devinitdata = {
"MODEM", "Modem", "modem", "FAX", "Fax", "fax",
"56K", "56k", "K56", "33.6", "28.8", "14.4",
"33,600", "28,800", "14,400", "33.600", "28.800", "14.400",
"33600", "28800", "14400", "V.90", "V.34", "V.32", NULL
};
static int __devinit check_name(char *name)
{
char **tmp;
for (tmp = modem_names; *tmp; tmp++)
if (strstr(name, *tmp))
return 1;
return 0;
}
static int __devinit check_resources(struct pnp_dev *dev)
{
resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
int i;
for (i = 0; i < ARRAY_SIZE(base); i++) {
if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8))
return 1;
}
return 0;
}
/*
* Given a complete unknown PnP device, try to use some heuristics to
* detect modems. Currently use such heuristic set:
* - dev->name or dev->bus->name must contain "modem" substring;
* - device must have only one IO region (8 byte long) with base address
* 0x2e8, 0x3e8, 0x2f8 or 0x3f8.
*
* Such detection looks very ugly, but can detect at least some of numerous
* PnP modems, alternatively we must hardcode all modems in pnp_devices[]
* table.
*/
static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags)
{
if (!(check_name(pnp_dev_name(dev)) ||
(dev->card && check_name(dev->card->name))))
return -ENODEV;
if (check_resources(dev))
return 0;
return -ENODEV;
}
static int __devinit
serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_port port;
int ret, line, flags = dev_id->driver_data;
if (flags & UNKNOWN_DEV) {
ret = serial_pnp_guess_board(dev, &flags);
if (ret < 0)
return ret;
}
memset(&port, 0, sizeof(struct uart_port));
if (pnp_irq_valid(dev, 0))
port.irq = pnp_irq(dev, 0);
if (pnp_port_valid(dev, 0)) {
port.iobase = pnp_port_start(dev, 0);
port.iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
port.mapbase = pnp_mem_start(dev, 0);
port.iotype = UPIO_MEM;
port.flags = UPF_IOREMAP;
} else
return -ENODEV;
#ifdef SERIAL_DEBUG_PNP
printk(KERN_DEBUG
"Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
port.iobase, port.mapbase, port.irq, port.iotype);
#endif
port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE)
port.flags |= UPF_SHARE_IRQ;
port.uartclk = 1843200;
port.dev = &dev->dev;
line = serial8250_register_port(&port);
if (line < 0)
return -ENODEV;
pnp_set_drvdata(dev, (void *)((long)line + 1));
return 0;
}
static void __devexit serial_pnp_remove(struct pnp_dev *dev)
{
long line = (long)pnp_get_drvdata(dev);
if (line)
serial8250_unregister_port(line - 1);
}
#ifdef CONFIG_PM
static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
{
long line = (long)pnp_get_drvdata(dev);
if (!line)
return -ENODEV;
serial8250_suspend_port(line - 1);
return 0;
}
static int serial_pnp_resume(struct pnp_dev *dev)
{
long line = (long)pnp_get_drvdata(dev);
if (!line)
return -ENODEV;
serial8250_resume_port(line - 1);
return 0;
}
#else
#define serial_pnp_suspend NULL
#define serial_pnp_resume NULL
#endif /* CONFIG_PM */
static struct pnp_driver serial_pnp_driver = {
.name = "serial",
.probe = serial_pnp_probe,
.remove = __devexit_p(serial_pnp_remove),
.suspend = serial_pnp_suspend,
.resume = serial_pnp_resume,
.id_table = pnp_dev_table,
};
static int __init serial8250_pnp_init(void)
{
return pnp_register_driver(&serial_pnp_driver);
}
static void __exit serial8250_pnp_exit(void)
{
pnp_unregister_driver(&serial_pnp_driver);
}
module_init(serial8250_pnp_init);
module_exit(serial8250_pnp_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250/16x50 PnP serial driver");
| gpl-2.0 |
jmztaylor/android_kernel_htc_k2plccl | drivers/media/video/saa7164/saa7164-i2c.c | 8203 | 3666 | /*
* Driver for the NXP SAA7164 PCIe bridge
*
* Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
#include "saa7164.h"
static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
{
struct saa7164_i2c *bus = i2c_adap->algo_data;
struct saa7164_dev *dev = bus->dev;
int i, retval = 0;
dprintk(DBGLVL_I2C, "%s(num = %d)\n", __func__, num);
for (i = 0 ; i < num; i++) {
dprintk(DBGLVL_I2C, "%s(num = %d) addr = 0x%02x len = 0x%x\n",
__func__, num, msgs[i].addr, msgs[i].len);
if (msgs[i].flags & I2C_M_RD) {
/* Unsupported - Yet*/
printk(KERN_ERR "%s() Unsupported - Yet\n", __func__);
continue;
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr) {
/* write then read from same address */
retval = saa7164_api_i2c_read(bus, msgs[i].addr,
msgs[i].len, msgs[i].buf,
msgs[i+1].len, msgs[i+1].buf
);
i++;
if (retval < 0)
goto err;
} else {
/* write */
retval = saa7164_api_i2c_write(bus, msgs[i].addr,
msgs[i].len, msgs[i].buf);
}
if (retval < 0)
goto err;
}
return num;
err:
return retval;
}
void saa7164_call_i2c_clients(struct saa7164_i2c *bus, unsigned int cmd,
void *arg)
{
if (bus->i2c_rc != 0)
return;
i2c_clients_command(&bus->i2c_adap, cmd, arg);
}
static u32 saa7164_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm saa7164_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = saa7164_functionality,
};
/* ----------------------------------------------------------------------- */
static struct i2c_adapter saa7164_i2c_adap_template = {
.name = "saa7164",
.owner = THIS_MODULE,
.algo = &saa7164_i2c_algo_template,
};
static struct i2c_client saa7164_i2c_client_template = {
.name = "saa7164 internal",
};
int saa7164_i2c_register(struct saa7164_i2c *bus)
{
struct saa7164_dev *dev = bus->dev;
dprintk(DBGLVL_I2C, "%s(bus = %d)\n", __func__, bus->nr);
memcpy(&bus->i2c_adap, &saa7164_i2c_adap_template,
sizeof(bus->i2c_adap));
memcpy(&bus->i2c_algo, &saa7164_i2c_algo_template,
sizeof(bus->i2c_algo));
memcpy(&bus->i2c_client, &saa7164_i2c_client_template,
sizeof(bus->i2c_client));
bus->i2c_adap.dev.parent = &dev->pci->dev;
strlcpy(bus->i2c_adap.name, bus->dev->name,
sizeof(bus->i2c_adap.name));
bus->i2c_algo.data = bus;
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, bus);
i2c_add_adapter(&bus->i2c_adap);
bus->i2c_client.adapter = &bus->i2c_adap;
if (0 != bus->i2c_rc)
printk(KERN_ERR "%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
return bus->i2c_rc;
}
int saa7164_i2c_unregister(struct saa7164_i2c *bus)
{
i2c_del_adapter(&bus->i2c_adap);
return 0;
}
| gpl-2.0 |
cleaton/android_kernel_samsung_smdk4210 | drivers/gpu/drm/ttm/ttm_module.c | 8459 | 2974 | /**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
* Jerome Glisse
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
#include "ttm/ttm_module.h"
#include "drm_sysfs.h"
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
/**
* Add pm ops here.
*/
};
static void ttm_drm_class_device_release(struct device *dev)
{
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
}
static struct device ttm_drm_class_device = {
.type = &ttm_drm_class_type,
.release = &ttm_drm_class_device_release
};
struct kobject *ttm_get_kobj(void)
{
struct kobject *kobj = &ttm_drm_class_device.kobj;
BUG_ON(kobj == NULL);
return kobj;
}
static int __init ttm_init(void)
{
int ret;
ret = dev_set_name(&ttm_drm_class_device, "ttm");
if (unlikely(ret != 0))
return ret;
atomic_set(&device_released, 0);
ret = drm_class_device_register(&ttm_drm_class_device);
if (unlikely(ret != 0))
goto out_no_dev_reg;
return 0;
out_no_dev_reg:
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
return ret;
}
static void __exit ttm_exit(void)
{
drm_class_device_unregister(&ttm_drm_class_device);
/**
* Refuse to unload until the TTM device is released.
* Not sure this is 100% needed.
*/
wait_event(exit_q, atomic_read(&device_released) == 1);
}
module_init(ttm_init);
module_exit(ttm_exit);
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
Team-M8/android_kernel_htc_msm8974-staging | net/ipv4/netfilter/arp_tables.c | 12 | 47584 | /*
* Packet matching code for ARP packets.
*
* Based heavily, if not almost entirely, upon ip_tables.c framework.
*
* Some ARP specific bits are:
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/capability.h>
#include <linux/if_arp.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <net/compat.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("arptables core");
/*#define DEBUG_ARP_TABLES*/
/*#define DEBUG_ARP_TABLES_USER*/
#ifdef DEBUG_ARP_TABLES
#define dprintf(format, args...) printk(format , ## args)
#else
#define dprintf(format, args...)
#endif
#ifdef DEBUG_ARP_TABLES_USER
#define duprintf(format, args...) printk(format , ## args)
#else
#define duprintf(format, args...)
#endif
#ifdef CONFIG_NETFILTER_DEBUG
#define ARP_NF_ASSERT(x) WARN_ON(!(x))
#else
#define ARP_NF_ASSERT(x)
#endif
void *arpt_alloc_initial_table(const struct xt_table *info)
{
return xt_alloc_initial_table(arpt, ARPT);
}
EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
const char *hdr_addr, int len)
{
int i, ret;
if (len > ARPT_DEV_ADDR_LEN_MAX)
len = ARPT_DEV_ADDR_LEN_MAX;
ret = 0;
for (i = 0; i < len; i++)
ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
return ret != 0;
}
/*
* Unfortunately, _b and _mask are not aligned to an int (or long int)
* Some arches dont care, unrolling the loop is a win on them.
* For other arches, we only have a 16bit alignement.
*/
static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
unsigned long ret = ifname_compare_aligned(_a, _b, _mask);
#else
unsigned long ret = 0;
const u16 *a = (const u16 *)_a;
const u16 *b = (const u16 *)_b;
const u16 *mask = (const u16 *)_mask;
int i;
for (i = 0; i < IFNAMSIZ/sizeof(u16); i++)
ret |= (a[i] ^ b[i]) & mask[i];
#endif
return ret;
}
/* Returns whether packet matches rule or not. */
static inline int arp_packet_match(const struct arphdr *arphdr,
struct net_device *dev,
const char *indev,
const char *outdev,
const struct arpt_arp *arpinfo)
{
const char *arpptr = (char *)(arphdr + 1);
const char *src_devaddr, *tgt_devaddr;
__be32 src_ipaddr, tgt_ipaddr;
long ret;
#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
ARPT_INV_ARPOP)) {
dprintf("ARP operation field mismatch.\n");
dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask);
return 0;
}
if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd,
ARPT_INV_ARPHRD)) {
dprintf("ARP hardware address format mismatch.\n");
dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask);
return 0;
}
if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro,
ARPT_INV_ARPPRO)) {
dprintf("ARP protocol address format mismatch.\n");
dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask);
return 0;
}
if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln,
ARPT_INV_ARPHLN)) {
dprintf("ARP hardware address length mismatch.\n");
dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask);
return 0;
}
src_devaddr = arpptr;
arpptr += dev->addr_len;
memcpy(&src_ipaddr, arpptr, sizeof(u32));
arpptr += sizeof(u32);
tgt_devaddr = arpptr;
arpptr += dev->addr_len;
memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len),
ARPT_INV_SRCDEVADDR) ||
FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len),
ARPT_INV_TGTDEVADDR)) {
dprintf("Source or target device address mismatch.\n");
return 0;
}
if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr,
ARPT_INV_SRCIP) ||
FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr),
ARPT_INV_TGTIP)) {
dprintf("Source or target IP address mismatch.\n");
dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
&src_ipaddr,
&arpinfo->smsk.s_addr,
&arpinfo->src.s_addr,
arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
&tgt_ipaddr,
&arpinfo->tmsk.s_addr,
&arpinfo->tgt.s_addr,
arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
return 0;
}
/* Look for ifname matches. */
ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
dprintf("VIA in mismatch (%s vs %s).%s\n",
indev, arpinfo->iniface,
arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":"");
return 0;
}
ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
dprintf("VIA out mismatch (%s vs %s).%s\n",
outdev, arpinfo->outiface,
arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":"");
return 0;
}
return 1;
#undef FWINV
}
static inline int arp_checkentry(const struct arpt_arp *arp)
{
if (arp->flags & ~ARPT_F_MASK) {
duprintf("Unknown flag bits set: %08X\n",
arp->flags & ~ARPT_F_MASK);
return 0;
}
if (arp->invflags & ~ARPT_INV_MASK) {
duprintf("Unknown invflag bits set: %08X\n",
arp->invflags & ~ARPT_INV_MASK);
return 0;
}
return 1;
}
static unsigned int
arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
if (net_ratelimit())
pr_err("arp_tables: error: '%s'\n",
(const char *)par->targinfo);
return NF_DROP;
}
static inline const struct xt_entry_target *
arpt_get_target_c(const struct arpt_entry *e)
{
return arpt_get_target((struct arpt_entry *)e);
}
static inline struct arpt_entry *
get_entry(const void *base, unsigned int offset)
{
return (struct arpt_entry *)(base + offset);
}
static inline __pure
struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
{
return (void *)entry + entry->next_offset;
}
unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
struct arpt_entry *e, *back;
const char *indev, *outdev;
void *table_base;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
local_bh_disable();
addend = xt_write_recseq_begin();
private = table->private;
/*
* Ensure we load private-> members after we've fetched the base
* pointer.
*/
smp_read_barrier_depends();
table_base = private->entries[smp_processor_id()];
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
acpar.in = in;
acpar.out = out;
acpar.hooknum = hook;
acpar.family = NFPROTO_ARP;
acpar.hotdrop = false;
arp = arp_hdr(skb);
do {
const struct xt_entry_target *t;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e = arpt_next_entry(e);
continue;
}
ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
t = arpt_get_target_c(e);
/* Standard target? */
if (!t->u.kernel.target->target) {
int v;
v = ((struct xt_standard_target *)t)->verdict;
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
verdict = (unsigned)(-v) - 1;
break;
}
e = back;
back = get_entry(table_base, back->comefrom);
continue;
}
if (table_base + v
!= arpt_next_entry(e)) {
/* Save old back ptr in next entry */
struct arpt_entry *next = arpt_next_entry(e);
next->comefrom = (void *)back - table_base;
/* set back pointer to next entry */
back = next;
}
e = get_entry(table_base, v);
continue;
}
/* Targets which reenter must return
* abs. verdicts
*/
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
arp = arp_hdr(skb);
if (verdict == XT_CONTINUE)
e = arpt_next_entry(e);
else
/* Verdict */
break;
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
if (acpar.hotdrop)
return NF_DROP;
else
return verdict;
}
/* All zeroes == unconditional rule. */
static inline bool unconditional(const struct arpt_entry *e)
{
static const struct arpt_arp uncond;
return e->target_offset == sizeof(struct arpt_entry) &&
memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
* there are loops. Puts hook bitmask in comefrom.
*/
static int mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
* to 0 as we leave), and comefrom to save source hook bitmask.
*/
for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct arpt_entry *e
= (struct arpt_entry *)(entry0 + pos);
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)arpt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
pr_notice("arptables: loop hook %u pos %u %08X.\n",
hook, pos, e->comefrom);
return 0;
}
e->comefrom
|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < -NF_MAX_VERDICT - 1) {
duprintf("mark_source_chains: bad "
"negative verdict (%i)\n",
t->verdict);
return 0;
}
/* Return: backtrack through the last
* big jump.
*/
do {
e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = (struct arpt_entry *)
(entry0 + pos);
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = (struct arpt_entry *)
(entry0 + pos + size);
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
if (newpos > newinfo->size -
sizeof(struct arpt_entry)) {
duprintf("mark_source_chains: "
"bad verdict (%i)\n",
newpos);
return 0;
}
/* This a jump; chase it. */
duprintf("Jump rule %u -> %u\n",
pos, newpos);
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
}
e = (struct arpt_entry *)
(entry0 + newpos);
e->counters.pcnt = pos;
pos = newpos;
}
}
next:
duprintf("Finished chain %u\n", hook);
}
return 1;
}
static inline int check_entry(const struct arpt_entry *e, const char *name)
{
const struct xt_entry_target *t;
if (!arp_checkentry(&e->arp)) {
duprintf("arp_tables: arp check failed %p %s.\n", e, name);
return -EINVAL;
}
if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
return -EINVAL;
t = arpt_get_target_c(e);
if (e->target_offset + t->u.target_size > e->next_offset)
return -EINVAL;
return 0;
}
static inline int check_target(struct arpt_entry *e, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
int ret;
struct xt_tgchk_param par = {
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
.targinfo = t->data,
.hook_mask = e->comefrom,
.family = NFPROTO_ARP,
};
ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
if (ret < 0) {
duprintf("arp_tables: check failed for `%s'.\n",
t->u.kernel.target->name);
return ret;
}
return 0;
}
static inline int
find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
{
struct xt_entry_target *t;
struct xt_target *target;
int ret;
ret = check_entry(e, name);
if (ret)
return ret;
t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
ret = check_target(e, name);
if (ret)
goto err;
return 0;
err:
module_put(t->u.kernel.target->me);
out:
return ret;
}
static bool check_underflow(const struct arpt_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(e))
return false;
t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
if (e->next_offset
< sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
pr_debug("Underflows must be unconditional and "
"use the STANDARD target with "
"ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
static inline void cleanup_entry(struct arpt_entry *e)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
t = arpt_get_target(e);
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_ARP;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
}
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
static int translate_table(struct xt_table_info *newinfo, void *entry0,
const struct arpt_replace *repl)
{
struct arpt_entry *iter;
unsigned int i;
int ret = 0;
newinfo->size = repl->size;
newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = 0xFFFFFFFF;
newinfo->underflow[i] = 0xFFFFFFFF;
}
duprintf("translate_table: size %u\n", newinfo->size);
i = 0;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
entry0 + repl->size,
repl->hook_entry,
repl->underflow,
repl->valid_hooks);
if (ret != 0)
break;
++i;
if (strcmp(arpt_get_target(iter)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
return ret;
if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
i, repl->num_entries);
return -EINVAL;
}
/* Check hooks all assigned */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
/* Only hooks which are valid */
if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
i, repl->hook_entry[i]);
return -EINVAL;
}
if (newinfo->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
i, repl->underflow[i]);
return -EINVAL;
}
}
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
duprintf("Looping hook\n");
return -ELOOP;
}
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = find_check_entry(iter, repl->name, repl->size);
if (ret != 0)
break;
++i;
}
if (ret != 0) {
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter);
}
return ret;
}
/* And one copy for every other CPU */
for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
return ret;
}
static void get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0;
xt_entry_foreach(iter, t->entries[cpu], t->size) {
u64 bcnt, pcnt;
unsigned int start;
do {
start = read_seqcount_begin(s);
bcnt = iter->counters.bcnt;
pcnt = iter->counters.pcnt;
} while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i;
}
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
* about).
*/
countersize = sizeof(struct xt_counters) * private->number;
counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
get_counters(private, counters);
return counters;
}
static int copy_entries_to_user(unsigned int total_size,
const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
const struct arpt_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
loc_cpu_entry = private->entries[raw_smp_processor_id()];
/* ... then copy entire thing ... */
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT;
goto free_counters;
}
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
const struct xt_entry_target *t;
e = (struct arpt_entry *)(loc_cpu_entry + off);
if (copy_to_user(userptr + off
+ offsetof(struct arpt_entry, counters),
&counters[num],
sizeof(counters[num])) != 0) {
ret = -EFAULT;
goto free_counters;
}
t = arpt_get_target_c(e);
if (copy_to_user(userptr + off + e->target_offset
+ offsetof(struct xt_entry_target,
u.user.name),
t->u.kernel.target->name,
strlen(t->u.kernel.target->name)+1) != 0) {
ret = -EFAULT;
goto free_counters;
}
}
free_counters:
vfree(counters);
return ret;
}
#ifdef CONFIG_COMPAT
static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v > 0)
v += xt_compat_calc_jump(NFPROTO_ARP, v);
memcpy(dst, &v, sizeof(v));
}
static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
if (cv > 0)
cv -= xt_compat_calc_jump(NFPROTO_ARP, cv);
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
static int compat_calc_entry(const struct arpt_entry *e,
const struct xt_table_info *info,
const void *base, struct xt_table_info *newinfo)
{
const struct xt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - base;
t = arpt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
return ret;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
if (info->hook_entry[i] &&
(e < (struct arpt_entry *)(base + info->hook_entry[i])))
newinfo->hook_entry[i] -= off;
if (info->underflow[i] &&
(e < (struct arpt_entry *)(base + info->underflow[i])))
newinfo->underflow[i] -= off;
}
return 0;
}
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
struct arpt_entry *iter;
void *loc_cpu_entry;
int ret;
if (!newinfo || !info)
return -EINVAL;
/* we dont care about newinfo->entries[] */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
xt_compat_init_offsets(NFPROTO_ARP, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
return ret;
}
return 0;
}
#endif
static int get_info(struct net *net, void __user *user,
const int *len, int compat)
{
char name[XT_TABLE_MAXNAMELEN];
struct xt_table *t;
int ret;
if (*len != sizeof(struct arpt_getinfo)) {
duprintf("length %u != %Zu\n", *len,
sizeof(struct arpt_getinfo));
return -EINVAL;
}
if (copy_from_user(name, user, sizeof(name)) != 0)
return -EFAULT;
name[XT_TABLE_MAXNAMELEN-1] = '\0';
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_lock(NFPROTO_ARP);
#endif
t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
"arptable_%s", name);
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
if (compat) {
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(NFPROTO_ARP);
private = &tmp;
}
#endif
memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
strcpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
xt_table_unlock(t);
module_put(t->me);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_unlock(NFPROTO_ARP);
#endif
return ret;
}
static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
const int *len)
{
int ret;
struct arpt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get)) {
duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
return -EINVAL;
}
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct arpt_get_entries) + get.size) {
duprintf("get_entries: %u != %Zu\n", *len,
sizeof(struct arpt_get_entries) + get.size);
return -EINVAL;
}
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (t && !IS_ERR(t)) {
const struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n",
private->number);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else {
duprintf("get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EAGAIN;
}
module_put(t->me);
xt_table_unlock(t);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
return ret;
}
static int __do_replace(struct net *net, const char *name,
unsigned int valid_hooks,
struct xt_table_info *newinfo,
unsigned int num_counters,
void __user *counters_ptr)
{
int ret;
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
struct arpt_entry *iter;
ret = 0;
counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
}
t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
"arptable_%s", name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free_newinfo_counters_untrans;
}
/* You lied! */
if (valid_hooks != t->valid_hooks) {
duprintf("Valid hook crap: %08X vs %08X\n",
valid_hooks, t->valid_hooks);
ret = -EINVAL;
goto put_module;
}
oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
/* Update module usage count based on number of rules */
duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
oldinfo->number, oldinfo->initial_entries, newinfo->number);
if ((oldinfo->number > oldinfo->initial_entries) ||
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
if ((oldinfo->number > oldinfo->initial_entries) &&
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
/* Get the old counters, and synchronize with replace */
get_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
cleanup_entry(iter);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0) {
/* Silent error, can't fail, new table is already in place */
net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
}
vfree(counters);
xt_table_unlock(t);
return ret;
put_module:
module_put(t->me);
xt_table_unlock(t);
free_newinfo_counters_untrans:
vfree(counters);
out:
return ret;
}
static int do_replace(struct net *net, const void __user *user,
unsigned int len)
{
int ret;
struct arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
duprintf("arp_tables: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int do_add_counters(struct net *net, const void __user *user,
unsigned int len, int compat)
{
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
unsigned int num_counters;
const char *name;
int size;
void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
struct arpt_entry *iter;
unsigned int addend;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
if (compat) {
ptmp = &compat_tmp;
size = sizeof(struct compat_xt_counters_info);
} else
#endif
{
ptmp = &tmp;
size = sizeof(struct xt_counters_info);
}
if (copy_from_user(ptmp, user, size) != 0)
return -EFAULT;
#ifdef CONFIG_COMPAT
if (compat) {
num_counters = compat_tmp.num_counters;
name = compat_tmp.name;
} else
#endif
{
num_counters = tmp.num_counters;
name = tmp.name;
}
if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL;
paddc = vmalloc(len - size);
if (!paddc)
return -ENOMEM;
if (copy_from_user(paddc, user + size, len - size) != 0) {
ret = -EFAULT;
goto free;
}
t = xt_find_table_lock(net, NFPROTO_ARP, name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
}
local_bh_disable();
private = t->private;
if (private->number != num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
loc_cpu_entry = private->entries[curcpu];
addend = xt_write_recseq_begin();
xt_entry_foreach(iter, loc_cpu_entry, private->size) {
ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
++i;
}
xt_write_recseq_end(addend);
unlock_up_free:
local_bh_enable();
xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
return ret;
}
#ifdef CONFIG_COMPAT
static inline void compat_release_entry(struct compat_arpt_entry *e)
{
struct xt_entry_target *t;
t = compat_arpt_get_target(e);
module_put(t->u.kernel.target->me);
}
static inline int
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_arpt_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct arpt_entry *)e, name);
if (ret)
return ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - (void *)base;
t = compat_arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto out;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
if (ret)
goto release_target;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
release_target:
module_put(t->u.kernel.target->me);
out:
return ret;
}
static int
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
unsigned int *size, const char *name,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct xt_target *target;
struct arpt_entry *de;
unsigned int origsize;
int ret, h;
ret = 0;
origsize = *size;
de = (struct arpt_entry *)*dstptr;
memcpy(de, e, sizeof(struct arpt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
*dstptr += sizeof(struct arpt_entry);
*size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
de->target_offset = e->target_offset - (origsize - *size);
t = compat_arpt_get_target(e);
target = t->u.kernel.target;
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
return ret;
}
static int translate_compat_table(const char *name,
unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
unsigned int total_size,
unsigned int number,
unsigned int *hook_entries,
unsigned int *underflows)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_arpt_entry *iter0;
struct arpt_entry *iter1;
unsigned int size;
int ret = 0;
info = *pinfo;
entry0 = *pentry0;
size = total_size;
info->number = number;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
info->hook_entry[i] = 0xFFFFFFFF;
info->underflow[i] = 0xFFFFFFFF;
}
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
xt_compat_init_offsets(NFPROTO_ARP, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
entry0 + total_size,
hook_entries,
underflows,
name);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
if (j != number) {
duprintf("translate_compat_table: %u not %u entries\n",
j, number);
goto out_unlock;
}
/* Check hooks all assigned */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
/* Only hooks which are valid */
if (!(valid_hooks & (1 << i)))
continue;
if (info->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
i, hook_entries[i]);
goto out_unlock;
}
if (info->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
i, underflows[i]);
goto out_unlock;
}
}
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
newinfo->number = number;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
size = total_size;
xt_entry_foreach(iter0, entry0, total_size) {
ret = compat_copy_entry_from_user(iter0, &pos, &size,
name, newinfo, entry1);
if (ret != 0)
break;
}
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
if (ret)
goto free_newinfo;
ret = -ELOOP;
if (!mark_source_chains(newinfo, valid_hooks, entry1))
goto free_newinfo;
i = 0;
xt_entry_foreach(iter1, entry1, newinfo->size) {
ret = check_target(iter1, name);
if (ret != 0)
break;
++i;
if (strcmp(arpt_get_target(iter1)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
if (ret) {
/*
* The first i matches need cleanup_entry (calls ->destroy)
* because they had called ->check already. The other j-i
* entries need only release.
*/
int skip = i;
j -= i;
xt_entry_foreach(iter0, entry0, newinfo->size) {
if (skip-- > 0)
continue;
if (j-- == 0)
break;
compat_release_entry(iter0);
}
xt_entry_foreach(iter1, entry1, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter1);
}
xt_free_table_info(newinfo);
return ret;
}
/* And one copy for every other CPU */
for_each_possible_cpu(i)
if (newinfo->entries[i] && newinfo->entries[i] != entry1)
memcpy(newinfo->entries[i], entry1, newinfo->size);
*pinfo = newinfo;
*pentry0 = entry1;
xt_free_table_info(info);
return 0;
free_newinfo:
xt_free_table_info(newinfo);
out:
xt_entry_foreach(iter0, entry0, total_size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
out_unlock:
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
goto out;
}
struct compat_arpt_replace {
char name[XT_TABLE_MAXNAMELEN];
u32 valid_hooks;
u32 num_entries;
u32 size;
u32 hook_entry[NF_ARP_NUMHOOKS];
u32 underflow[NF_ARP_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters;
struct compat_arpt_entry entries[0];
};
static int compat_do_replace(struct net *net, void __user *user,
unsigned int len)
{
int ret;
struct compat_arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.size >= INT_MAX / num_possible_cpus())
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
if (ret != 0)
goto free_newinfo;
duprintf("compat_do_replace: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
unsigned int len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_SET_REPLACE:
ret = compat_do_replace(sock_net(sk), user, len);
break;
case ARPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), user, len, 1);
break;
default:
duprintf("do_arpt_set_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
}
static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
compat_uint_t *size,
struct xt_counters *counters,
unsigned int i)
{
struct xt_entry_target *t;
struct compat_arpt_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
int ret;
origsize = *size;
ce = (struct compat_arpt_entry __user *)*dstptr;
if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
return -EFAULT;
*dstptr += sizeof(struct compat_arpt_entry);
*size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
target_offset = e->target_offset - (origsize - *size);
t = arpt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(target_offset, &ce->target_offset) != 0 ||
put_user(next_offset, &ce->next_offset) != 0)
return -EFAULT;
return 0;
}
static int compat_copy_entries_to_user(unsigned int total_size,
struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
void *loc_cpu_entry;
unsigned int i = 0;
struct arpt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
/* choose the copy on our node/cpu */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr;
size = total_size;
xt_entry_foreach(iter, loc_cpu_entry, total_size) {
ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++);
if (ret != 0)
break;
}
vfree(counters);
return ret;
}
struct compat_arpt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
struct compat_arpt_entry entrytable[0];
};
static int compat_get_entries(struct net *net,
struct compat_arpt_get_entries __user *uptr,
int *len)
{
int ret;
struct compat_arpt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get)) {
duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
return -EINVAL;
}
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct compat_arpt_get_entries) + get.size) {
duprintf("compat_get_entries: %u != %zu\n",
*len, sizeof(get) + get.size);
return -EINVAL;
}
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (t && !IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
duprintf("t->private->number = %u\n", private->number);
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size) {
ret = compat_copy_entries_to_user(private->size,
t, uptr->entrytable);
} else if (!ret) {
duprintf("compat_get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EAGAIN;
}
xt_compat_flush_offsets(NFPROTO_ARP);
module_put(t->me);
xt_table_unlock(t);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
xt_compat_unlock(NFPROTO_ARP);
return ret;
}
static int do_arpt_get_ctl(struct sock *, int, void __user *, int *);
static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
int *len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len, 1);
break;
case ARPT_SO_GET_ENTRIES:
ret = compat_get_entries(sock_net(sk), user, len);
break;
default:
ret = do_arpt_get_ctl(sk, cmd, user, len);
}
return ret;
}
#endif
static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_SET_REPLACE:
ret = do_replace(sock_net(sk), user, len);
break;
case ARPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), user, len, 0);
break;
default:
duprintf("do_arpt_set_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
}
static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len, 0);
break;
case ARPT_SO_GET_ENTRIES:
ret = get_entries(sock_net(sk), user, len);
break;
case ARPT_SO_GET_REVISION_TARGET: {
struct xt_get_revision rev;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
rev.name[sizeof(rev.name)-1] = 0;
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
rev.revision, 1, &ret),
"arpt_%s", rev.name);
break;
}
default:
duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
}
struct xt_table *arpt_register_table(struct net *net,
const struct xt_table *table,
const struct arpt_replace *repl)
{
int ret;
struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo) {
ret = -ENOMEM;
goto out;
}
/* choose the copy on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(newinfo, loc_cpu_entry, repl);
duprintf("arpt_register_table: translate table gives %d\n", ret);
if (ret != 0)
goto out_free;
new_table = xt_register_table(net, table, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
ret = PTR_ERR(new_table);
goto out_free;
}
return new_table;
out_free:
xt_free_table_info(newinfo);
out:
return ERR_PTR(ret);
}
void arpt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
struct arpt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
}
/* The built-in targets: standard (NULL) and error. */
static struct xt_target arpt_builtin_tg[] __read_mostly = {
{
.name = XT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = NFPROTO_ARP,
#ifdef CONFIG_COMPAT
.compatsize = sizeof(compat_int_t),
.compat_from_user = compat_standard_from_user,
.compat_to_user = compat_standard_to_user,
#endif
},
{
.name = XT_ERROR_TARGET,
.target = arpt_error,
.targetsize = XT_FUNCTION_MAXNAMELEN,
.family = NFPROTO_ARP,
},
};
static struct nf_sockopt_ops arpt_sockopts = {
.pf = PF_INET,
.set_optmin = ARPT_BASE_CTL,
.set_optmax = ARPT_SO_SET_MAX+1,
.set = do_arpt_set_ctl,
#ifdef CONFIG_COMPAT
.compat_set = compat_do_arpt_set_ctl,
#endif
.get_optmin = ARPT_BASE_CTL,
.get_optmax = ARPT_SO_GET_MAX+1,
.get = do_arpt_get_ctl,
#ifdef CONFIG_COMPAT
.compat_get = compat_do_arpt_get_ctl,
#endif
.owner = THIS_MODULE,
};
static int __net_init arp_tables_net_init(struct net *net)
{
return xt_proto_init(net, NFPROTO_ARP);
}
static void __net_exit arp_tables_net_exit(struct net *net)
{
xt_proto_fini(net, NFPROTO_ARP);
}
static struct pernet_operations arp_tables_net_ops = {
.init = arp_tables_net_init,
.exit = arp_tables_net_exit,
};
static int __init arp_tables_init(void)
{
int ret;
ret = register_pernet_subsys(&arp_tables_net_ops);
if (ret < 0)
goto err1;
/* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
if (ret < 0)
goto err2;
/* Register setsockopt */
ret = nf_register_sockopt(&arpt_sockopts);
if (ret < 0)
goto err4;
printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n");
return 0;
err4:
xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
err2:
unregister_pernet_subsys(&arp_tables_net_ops);
err1:
return ret;
}
static void __exit arp_tables_fini(void)
{
nf_unregister_sockopt(&arpt_sockopts);
xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
unregister_pernet_subsys(&arp_tables_net_ops);
}
EXPORT_SYMBOL(arpt_register_table);
EXPORT_SYMBOL(arpt_unregister_table);
EXPORT_SYMBOL(arpt_do_table);
module_init(arp_tables_init);
module_exit(arp_tables_fini);
| gpl-2.0 |
AlexTheBest/ACore | src/server/scripts/EasternKingdoms/Scholomance/boss_illucia_barov.cpp | 12 | 3529 | /*
* Copyright (C) 2008-2010 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Illucia_Barov
SD%Complete: 100
SDComment:
SDCategory: Scholomance
EndScriptData */
#include "ScriptPCH.h"
#include "scholomance.h"
#define SPELL_CURSEOFAGONY 18671
#define SPELL_SHADOWSHOCK 20603
#define SPELL_SILENCE 15487
#define SPELL_FEAR 6215
class boss_illucia_barov : public CreatureScript
{
public:
boss_illucia_barov() : CreatureScript("boss_illucia_barov") { }
CreatureAI* GetAI(Creature* pCreature) const
{
return new boss_illuciabarovAI (pCreature);
}
struct boss_illuciabarovAI : public ScriptedAI
{
boss_illuciabarovAI(Creature *c) : ScriptedAI(c) {}
uint32 CurseOfAgony_Timer;
uint32 ShadowShock_Timer;
uint32 Silence_Timer;
uint32 Fear_Timer;
void Reset()
{
CurseOfAgony_Timer = 18000;
ShadowShock_Timer = 9000;
Silence_Timer = 5000;
Fear_Timer = 30000;
}
void JustDied(Unit * /*killer*/)
{
InstanceScript *pInstance = me->GetInstanceScript();
if (pInstance)
{
pInstance->SetData(DATA_LADYILLUCIABAROV_DEATH, 0);
if (pInstance->GetData(TYPE_GANDLING) == IN_PROGRESS)
me->SummonCreature(1853, 180.73f, -9.43856f, 75.507f, 1.61399f, TEMPSUMMON_DEAD_DESPAWN, 0);
}
}
void EnterCombat(Unit * /*who*/)
{
}
void UpdateAI(const uint32 diff)
{
if (!UpdateVictim())
return;
//CurseOfAgony_Timer
if (CurseOfAgony_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_CURSEOFAGONY);
CurseOfAgony_Timer = 30000;
} else CurseOfAgony_Timer -= diff;
//ShadowShock_Timer
if (ShadowShock_Timer <= diff)
{
Unit *pTarget = NULL;
pTarget = SelectUnit(SELECT_TARGET_RANDOM,0);
if (pTarget) DoCast(pTarget, SPELL_SHADOWSHOCK);
ShadowShock_Timer = 12000;
} else ShadowShock_Timer -= diff;
//Silence_Timer
if (Silence_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_SILENCE);
Silence_Timer = 14000;
} else Silence_Timer -= diff;
//Fear_Timer
if (Fear_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_FEAR);
Fear_Timer = 30000;
} else Fear_Timer -= diff;
DoMeleeAttackIfReady();
}
};
};
void AddSC_boss_illuciabarov()
{
new boss_illucia_barov();
}
| gpl-2.0 |
tkpb/Telegram | TMessagesProj/jni/third_party/libvpx/source/libvpx/vp8/encoder/x86/vp8_quantize_ssse3.c | 12 | 3274 | /*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <tmmintrin.h> /* SSSE3 */
#include "./vp8_rtcd.h"
#include "vp8/encoder/block.h"
/* bitscan reverse (bsr) */
#if defined(_MSC_VER)
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
static int bsr(int mask) {
unsigned long eob;
_BitScanReverse(&eob, mask);
eob++;
if (mask == 0) eob = 0;
return eob;
}
#else
static int bsr(int mask) {
int eob;
#if defined(__GNUC__) && __GNUC__
__asm__ __volatile__("bsr %1, %0" : "=r"(eob) : "r"(mask) : "flags");
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
asm volatile("bsr %1, %0" : "=r"(eob) : "r"(mask) : "flags");
#endif
eob++;
if (mask == 0) eob = 0;
return eob;
}
#endif
void vp8_fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d) {
int eob, mask;
__m128i z0 = _mm_load_si128((__m128i *)(b->coeff));
__m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8));
__m128i round0 = _mm_load_si128((__m128i *)(b->round));
__m128i round1 = _mm_load_si128((__m128i *)(b->round + 8));
__m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast));
__m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8));
__m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant));
__m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8));
__m128i sz0, sz1, x, x0, x1, y0, y1, zeros, abs0, abs1;
DECLARE_ALIGNED(16, const uint8_t,
pshufb_zig_zag_mask[16]) = { 0, 1, 4, 8, 5, 2, 3, 6,
9, 12, 13, 10, 7, 11, 14, 15 };
__m128i zig_zag = _mm_load_si128((const __m128i *)pshufb_zig_zag_mask);
/* sign of z: z >> 15 */
sz0 = _mm_srai_epi16(z0, 15);
sz1 = _mm_srai_epi16(z1, 15);
/* x = abs(z) */
x0 = _mm_abs_epi16(z0);
x1 = _mm_abs_epi16(z1);
/* x += round */
x0 = _mm_add_epi16(x0, round0);
x1 = _mm_add_epi16(x1, round1);
/* y = (x * quant) >> 16 */
y0 = _mm_mulhi_epi16(x0, quant_fast0);
y1 = _mm_mulhi_epi16(x1, quant_fast1);
/* ASM saves Y for EOB */
/* I think we can ignore that because adding the sign doesn't change anything
* and multiplying 0 by dequant is OK as well */
abs0 = y0;
abs1 = y1;
/* Restore the sign bit. */
y0 = _mm_xor_si128(y0, sz0);
y1 = _mm_xor_si128(y1, sz1);
x0 = _mm_sub_epi16(y0, sz0);
x1 = _mm_sub_epi16(y1, sz1);
/* qcoeff = x */
_mm_store_si128((__m128i *)(d->qcoeff), x0);
_mm_store_si128((__m128i *)(d->qcoeff + 8), x1);
/* x * dequant */
x0 = _mm_mullo_epi16(x0, dequant0);
x1 = _mm_mullo_epi16(x1, dequant1);
/* dqcoeff = x * dequant */
_mm_store_si128((__m128i *)(d->dqcoeff), x0);
_mm_store_si128((__m128i *)(d->dqcoeff + 8), x1);
zeros = _mm_setzero_si128();
x0 = _mm_cmpgt_epi16(abs0, zeros);
x1 = _mm_cmpgt_epi16(abs1, zeros);
x = _mm_packs_epi16(x0, x1);
x = _mm_shuffle_epi8(x, zig_zag);
mask = _mm_movemask_epi8(x);
eob = bsr(mask);
*d->eob = 0xFF & eob;
}
| gpl-2.0 |
jawad6233/Lenovo_A820_kernel_kk | mediatek/kernel/drivers/auxadc/mt_auxadc.c | 12 | 38050 | /*****************************************************************************
*
* Filename:
* ---------
* mt_auxadc.c
*
* Project:
* --------
* Android_Software
*
* Description:
* ------------
* This Module defines functions of AUXADC common code
*
* Author:
* -------
* Zhong Wang
*
****************************************************************************/
#include <linux/init.h> /* For init/exit macros */
#include <linux/module.h> /* For MODULE_ marcros */
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/mt_gpt.h>
#include <mach/mt_clkmgr.h>
#include <mach/sync_write.h>
#include <cust_adc.h> // generate by DCT Tool
#include "mt_auxadc.h"
#include <mt_auxadc_sw.h>
/*****************************************************************************
* Integrate with NVRAM
****************************************************************************/
#define AUXADC_CALI_DEVNAME "mtk-adc-cali"
#define TEST_ADC_CALI_PRINT _IO('k', 0)
#define SET_ADC_CALI_Slop _IOW('k', 1, int)
#define SET_ADC_CALI_Offset _IOW('k', 2, int)
#define SET_ADC_CALI_Cal _IOW('k', 3, int)
#define ADC_CHANNEL_READ _IOW('k', 4, int)
typedef struct adc_info
{
char channel_name[64];
int channel_number;
int reserve1;
int reserve2;
int reserve3;
}ADC_INFO;
static ADC_INFO g_adc_info[ADC_CHANNEL_MAX];
static int auxadc_cali_slop[ADC_CHANNEL_MAX] = {0};
static int auxadc_cali_offset[ADC_CHANNEL_MAX] = {0};
static kal_bool g_AUXADC_Cali = KAL_FALSE;
static int auxadc_cali_cal[1] = {0};
static int auxadc_in_data[2] = {1,1};
static int auxadc_out_data[2] = {1,1};
static DEFINE_MUTEX(auxadc_mutex);
static dev_t auxadc_cali_devno;
static int auxadc_cali_major = 0;
static struct cdev *auxadc_cali_cdev;
static struct class *auxadc_cali_class = NULL;
static struct task_struct *thread = NULL;
static int g_start_debug_thread =0;
static int g_adc_init_flag =0;
///////////////////////////////////////////////////////////////////////////////////////////
//// fop Common API
///////////////////////////////////////////////////////////////////////////////////////////
int IMM_IsAdcInitReady(void)
{
return g_adc_init_flag;
}
int IMM_get_adc_channel_num(char *channel_name, int len)
{
unsigned int i;
printk("[ADC] name = %s\n", channel_name);
printk("[ADC] name_len = %d\n", len);
for(i=0; i<ADC_CHANNEL_MAX; i++)
{
if (!strncmp(channel_name, g_adc_info[i].channel_name, len))
{
return g_adc_info[i].channel_number;
}
}
printk("[ADC] find channel number failed\n");
return -1;
}
int IMM_GetOneChannelValue(int dwChannel, int data[4], int* rawdata)
{
return IMM_auxadc_GetOneChannelValue(dwChannel, data, rawdata);
}
// 1v == 1000000 uv
// this function voltage Unit is uv
int IMM_GetOneChannelValue_Cali(int Channel, int*voltage)
{
return IMM_auxadc_GetOneChannelValue_Cali(Channel, voltage);
}
///////////////////////////////////////////////////////////////////////////////////////////
//// fop API
///////////////////////////////////////////////////////////////////////////////////////////
static long auxadc_cali_unlocked_ioctl(struct file *file, unsigned int cmd,unsigned long arg)
{
int i = 0, ret = 0;
int *user_data_addr;
int *nvram_data_addr;
mutex_lock(&auxadc_mutex);
switch(cmd)
{
case TEST_ADC_CALI_PRINT :
g_AUXADC_Cali = KAL_FALSE;
break;
case SET_ADC_CALI_Slop:
nvram_data_addr = (int *)arg;
ret = copy_from_user(auxadc_cali_slop, nvram_data_addr, 36);
g_AUXADC_Cali = KAL_FALSE;
/* Protection */
for (i = 0; i < ADC_CHANNEL_MAX; i++)
{
if ((*(auxadc_cali_slop + i) == 0) || (*(auxadc_cali_slop + i) == 1)) {
*(auxadc_cali_slop + i) = 1000;
}
}
for (i = 0; i < ADC_CHANNEL_MAX; i++) printk("auxadc_cali_slop[%d] = %d\n", i, *(auxadc_cali_slop+i));
printk("**** MT auxadc_cali ioctl : SET_ADC_CALI_Slop Done!\n");
break;
case SET_ADC_CALI_Offset:
nvram_data_addr = (int *)arg;
ret = copy_from_user(auxadc_cali_offset, nvram_data_addr, 36);
g_AUXADC_Cali = KAL_FALSE;
for (i = 0; i < ADC_CHANNEL_MAX; i++) printk("auxadc_cali_offset[%d] = %d\n", i, *(auxadc_cali_offset+i));
printk("**** MT auxadc_cali ioctl : SET_ADC_CALI_Offset Done!\n");
break;
case SET_ADC_CALI_Cal :
nvram_data_addr = (int *)arg;
ret = copy_from_user(auxadc_cali_cal, nvram_data_addr, 4);
g_AUXADC_Cali = KAL_TRUE; /* enable calibration after setting AUXADC_CALI_Cal */
if (auxadc_cali_cal[0] == 1) {
g_AUXADC_Cali = KAL_TRUE;
} else {
g_AUXADC_Cali = KAL_FALSE;
}
for (i = 0; i < 1; i++) printk("auxadc_cali_cal[%d] = %d\n", i, *(auxadc_cali_cal + i));
printk("**** MT auxadc_cali ioctl : SET_ADC_CALI_Cal Done!\n");
break;
case ADC_CHANNEL_READ:
g_AUXADC_Cali = KAL_FALSE; /* 20100508 Infinity */
user_data_addr = (int *)arg;
ret = copy_from_user(auxadc_in_data, user_data_addr, 8); /* 2*int = 2*4 */
printk("this api is removed !! \n");
ret = copy_to_user(user_data_addr, auxadc_out_data, 8);
printk("**** ioctl : AUXADC Channel %d * %d times = %d\n", auxadc_in_data[0], auxadc_in_data[1], auxadc_out_data[0]);
break;
default:
g_AUXADC_Cali = KAL_FALSE;
break;
}
mutex_unlock(&auxadc_mutex);
return 0;
}
static int auxadc_cali_open(struct inode *inode, struct file *file)
{
return 0;
}
static int auxadc_cali_release(struct inode *inode, struct file *file)
{
return 0;
}
static struct file_operations auxadc_cali_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = auxadc_cali_unlocked_ioctl,
.open = auxadc_cali_open,
.release = auxadc_cali_release,
};
///////////////////////////////////////////////////////////////////////////////////////////
//// Create File For EM : AUXADC_Channel_X_Slope/Offset
///////////////////////////////////////////////////////////////////////////////////////////
#if ADC_CHANNEL_MAX>0
static ssize_t show_AUXADC_Channel_0_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 0));
printk("[EM] AUXADC_Channel_0_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_0_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_0_Slope, 0664, show_AUXADC_Channel_0_Slope, store_AUXADC_Channel_0_Slope);
static ssize_t show_AUXADC_Channel_0_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 0));
printk("[EM] AUXADC_Channel_0_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_0_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_0_Offset, 0664, show_AUXADC_Channel_0_Offset, store_AUXADC_Channel_0_Offset);
#endif
#if ADC_CHANNEL_MAX>1
static ssize_t show_AUXADC_Channel_1_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 1));
printk("[EM] AUXADC_Channel_1_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_1_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_1_Slope, 0664, show_AUXADC_Channel_1_Slope, store_AUXADC_Channel_1_Slope);
static ssize_t show_AUXADC_Channel_1_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 1));
printk("[EM] AUXADC_Channel_1_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_1_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_1_Offset, 0664, show_AUXADC_Channel_1_Offset, store_AUXADC_Channel_1_Offset);
#endif
#if ADC_CHANNEL_MAX>2
static ssize_t show_AUXADC_Channel_2_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 2));
printk("[EM] AUXADC_Channel_2_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_2_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_2_Slope, 0664, show_AUXADC_Channel_2_Slope, store_AUXADC_Channel_2_Slope);
static ssize_t show_AUXADC_Channel_2_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 2));
printk("[EM] AUXADC_Channel_2_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_2_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_2_Offset, 0664, show_AUXADC_Channel_2_Offset, store_AUXADC_Channel_2_Offset);
#endif
#if ADC_CHANNEL_MAX>3
static ssize_t show_AUXADC_Channel_3_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 3));
printk("[EM] AUXADC_Channel_3_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_3_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_3_Slope, 0664, show_AUXADC_Channel_3_Slope, store_AUXADC_Channel_3_Slope);
static ssize_t show_AUXADC_Channel_3_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 3));
printk("[EM] AUXADC_Channel_3_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_3_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_3_Offset, 0664, show_AUXADC_Channel_3_Offset, store_AUXADC_Channel_3_Offset);
#endif
#if ADC_CHANNEL_MAX>4
static ssize_t show_AUXADC_Channel_4_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 4));
printk("[EM] AUXADC_Channel_4_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_4_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_4_Slope, 0664, show_AUXADC_Channel_4_Slope, store_AUXADC_Channel_4_Slope);
static ssize_t show_AUXADC_Channel_4_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 4));
printk("[EM] AUXADC_Channel_4_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_4_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_4_Offset, 0664, show_AUXADC_Channel_4_Offset, store_AUXADC_Channel_4_Offset);
#endif
#if ADC_CHANNEL_MAX>5
static ssize_t show_AUXADC_Channel_5_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 5));
printk("[EM] AUXADC_Channel_5_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_5_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_5_Slope, 0664, show_AUXADC_Channel_5_Slope, store_AUXADC_Channel_5_Slope);
static ssize_t show_AUXADC_Channel_5_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 5));
printk("[EM] AUXADC_Channel_5_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_5_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_5_Offset, 0664, show_AUXADC_Channel_5_Offset, store_AUXADC_Channel_5_Offset);
#endif
#if ADC_CHANNEL_MAX>6
static ssize_t show_AUXADC_Channel_6_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 6));
printk("[EM] AUXADC_Channel_6_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_6_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_6_Slope, 0664, show_AUXADC_Channel_6_Slope, store_AUXADC_Channel_6_Slope);
static ssize_t show_AUXADC_Channel_6_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 6));
printk("[EM] AUXADC_Channel_6_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_6_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_6_Offset, 0664, show_AUXADC_Channel_6_Offset, store_AUXADC_Channel_6_Offset);
#endif
#if ADC_CHANNEL_MAX>7
static ssize_t show_AUXADC_Channel_7_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 7));
printk("[EM] AUXADC_Channel_7_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_7_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_7_Slope, 0664, show_AUXADC_Channel_7_Slope, store_AUXADC_Channel_7_Slope);
static ssize_t show_AUXADC_Channel_7_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 7));
printk("[EM] AUXADC_Channel_7_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_7_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_7_Offset, 0664, show_AUXADC_Channel_7_Offset, store_AUXADC_Channel_7_Offset);
#endif
#if ADC_CHANNEL_MAX>8
static ssize_t show_AUXADC_Channel_8_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 8));
printk("[EM] AUXADC_Channel_8_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_8_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_8_Slope, 0664, show_AUXADC_Channel_8_Slope, store_AUXADC_Channel_8_Slope);
static ssize_t show_AUXADC_Channel_8_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 8));
printk("[EM] AUXADC_Channel_8_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_8_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_8_Offset, 0664, show_AUXADC_Channel_8_Offset, store_AUXADC_Channel_8_Offset);
#endif
#if ADC_CHANNEL_MAX>9
static ssize_t show_AUXADC_Channel_9_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 9));
printk("[EM] AUXADC_Channel_9_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_9_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_9_Slope, 0664, show_AUXADC_Channel_9_Slope, store_AUXADC_Channel_9_Slope);
static ssize_t show_AUXADC_Channel_9_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 9));
printk("[EM] AUXADC_Channel_9_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_9_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_9_Offset, 0664, show_AUXADC_Channel_9_Offset, store_AUXADC_Channel_9_Offset);
#endif
#if ADC_CHANNEL_MAX>10
static ssize_t show_AUXADC_Channel_10_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 10));
printk("[EM] AUXADC_Channel_10_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_10_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_10_Slope, 0664, show_AUXADC_Channel_10_Slope, store_AUXADC_Channel_10_Slope);
static ssize_t show_AUXADC_Channel_10_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 10));
printk("[EM] AUXADC_Channel_10_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_10_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_10_Offset, 0664, show_AUXADC_Channel_10_Offset, store_AUXADC_Channel_10_Offset);
#endif
#if ADC_CHANNEL_MAX>11
static ssize_t show_AUXADC_Channel_11_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 11));
printk("[EM] AUXADC_Channel_11_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_11_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_11_Slope, 0664, show_AUXADC_Channel_11_Slope, store_AUXADC_Channel_11_Slope);
static ssize_t show_AUXADC_Channel_11_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 11));
printk("[EM] AUXADC_Channel_11_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_11_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_11_Offset, 0664, show_AUXADC_Channel_11_Offset, store_AUXADC_Channel_11_Offset);
#endif
#if ADC_CHANNEL_MAX>12
static ssize_t show_AUXADC_Channel_12_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 12));
printk("[EM] AUXADC_Channel_12_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_12_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_12_Slope, 0664, show_AUXADC_Channel_12_Slope, store_AUXADC_Channel_12_Slope);
static ssize_t show_AUXADC_Channel_12_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 12));
printk("[EM] AUXADC_Channel_12_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_12_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_12_Offset, 0664, show_AUXADC_Channel_12_Offset, store_AUXADC_Channel_12_Offset);
#endif
#if ADC_CHANNEL_MAX>13
static ssize_t show_AUXADC_Channel_13_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 13));
printk("[EM] AUXADC_Channel_13_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_13_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_13_Slope, 0664, show_AUXADC_Channel_13_Slope, store_AUXADC_Channel_13_Slope);
static ssize_t show_AUXADC_Channel_13_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 13));
printk("[EM] AUXADC_Channel_13_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_13_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_13_Offset, 0664, show_AUXADC_Channel_13_Offset, store_AUXADC_Channel_13_Offset);
#endif
#if ADC_CHANNEL_MAX>14
static ssize_t show_AUXADC_Channel_14_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 14));
printk("[EM] AUXADC_Channel_14_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_14_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_14_Slope, 0664, show_AUXADC_Channel_14_Slope, store_AUXADC_Channel_14_Slope);
static ssize_t show_AUXADC_Channel_14_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 14));
printk("[EM] AUXADC_Channel_14_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_14_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_14_Offset, 0664, show_AUXADC_Channel_14_Offset, store_AUXADC_Channel_14_Offset);
#endif
#if ADC_CHANNEL_MAX>15
static ssize_t show_AUXADC_Channel_15_Slope(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_slop + 15));
printk("[EM] AUXADC_Channel_15_Slope : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_15_Slope(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_15_Slope, 0664, show_AUXADC_Channel_15_Slope, store_AUXADC_Channel_15_Slope);
static ssize_t show_AUXADC_Channel_15_Offset(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 1;
ret_value = (*(auxadc_cali_offset + 15));
printk("[EM] AUXADC_Channel_15_Offset : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_15_Offset(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_15_Offset, 0664, show_AUXADC_Channel_15_Offset, store_AUXADC_Channel_15_Offset);
#endif
///////////////////////////////////////////////////////////////////////////////////////////
//// Create File For EM : AUXADC_Channel_Is_Calibration
///////////////////////////////////////////////////////////////////////////////////////////
static ssize_t show_AUXADC_Channel_Is_Calibration(struct device *dev,struct device_attribute *attr, char *buf)
{
int ret_value = 2;
ret_value = g_AUXADC_Cali;
printk("[EM] AUXADC_Channel_Is_Calibration : %d\n", ret_value);
return sprintf(buf, "%u\n", ret_value);
}
static ssize_t store_AUXADC_Channel_Is_Calibration(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support Write Function\n");
return size;
}
static DEVICE_ATTR(AUXADC_Channel_Is_Calibration, 0664, show_AUXADC_Channel_Is_Calibration, store_AUXADC_Channel_Is_Calibration);
static ssize_t show_AUXADC_register(struct device *dev,struct device_attribute *attr, char *buf)
{
return mt_auxadc_dump_register(buf);
}
static ssize_t store_AUXADC_register(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
printk("[EM] Not Support store_AUXADC_register\n");
return size;
}
static DEVICE_ATTR(AUXADC_register, 0664, show_AUXADC_register, store_AUXADC_register);
static ssize_t show_AUXADC_chanel(struct device *dev,struct device_attribute *attr, char *buf)
{
//read data
int i = 0, data[4] = {0,0,0,0};
char buf_temp[960];
int res =0;
for (i = 0; i < 5; i++)
{
res = IMM_auxadc_GetOneChannelValue(i,data,NULL);
if(res < 0)
{
printk("[adc_driver]: get data error\n");
break;
}
else
{
printk("[adc_driver]: channel[%d]=%d.%d \n",i,data[0],data[1]);
sprintf(buf_temp,"channel[%d]=%d.%d \n",i,data[0],data[1]);
strcat(buf,buf_temp);
}
}
mt_auxadc_dump_register(buf_temp);
strcat(buf,buf_temp);
return strlen(buf);
}
static int dbug_thread(void *unused)
{
int i = 0, data[4] = {0,0,0,0};
int res =0;
int rawdata=0;
int cali_voltage =0;
while(g_start_debug_thread)
{
for (i = 0; i < ADC_CHANNEL_MAX; i++)
{
res = IMM_auxadc_GetOneChannelValue(i,data,&rawdata);
if(res < 0)
{
printk("[adc_driver]: get data error\n");
break;
}
else
{
printk("[adc_driver]: channel[%d]raw =%d\n",i,rawdata);
printk("[adc_driver]: channel[%d]=%d.%.02d \n",i,data[0],data[1]);
}
res = IMM_auxadc_GetOneChannelValue_Cali(i,&cali_voltage );
if(res < 0)
{
printk("[adc_driver]: get cali voltage error\n");
break;
}
else
{
printk("[adc_driver]: channel[%d] cali_voltage =%d\n",i,cali_voltage);
}
msleep(500);
}
msleep(500);
}
return 0;
}
static ssize_t store_AUXADC_channel(struct device *dev,struct device_attribute *attr, const char *buf, size_t size)
{
unsigned int start_flag;
int error;
if (sscanf(buf, "%u", &start_flag) != 1) {
printk("[adc_driver]: Invalid values\n");
return -EINVAL;
}
printk("[adc_driver] start flag =%d \n",start_flag);
g_start_debug_thread = start_flag;
if(1 == start_flag)
{
thread = kthread_run(dbug_thread, 0, "AUXADC");
if (IS_ERR(thread))
{
error = PTR_ERR(thread);
printk( "[adc_driver] failed to create kernel thread: %d\n", error);
}
}
return size;
}
static DEVICE_ATTR(AUXADC_read_channel, 0664, show_AUXADC_chanel, store_AUXADC_channel);
static int mt_auxadc_create_device_attr(struct platform_device *dev)
{
int ret = 0;
/* For EM */
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_register)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_read_channel)) != 0) goto exit;
#if ADC_CHANNEL_MAX>0
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_0_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_0_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>1
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_1_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_1_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>2
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_2_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_2_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>3
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_3_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_3_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>4
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_4_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_4_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>5
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_5_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_5_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>6
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_6_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_6_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>7
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_7_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_7_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>8
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_8_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_8_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>9
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_9_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_9_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>10
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_10_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_10_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>11
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_11_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_11_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>12
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_12_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_12_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>13
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_13_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_13_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>14
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_14_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_14_Offset)) != 0) goto exit;
#endif
#if ADC_CHANNEL_MAX>15
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_15_Slope)) != 0) goto exit;
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_15_Offset)) != 0) goto exit;
#endif
if ((ret = device_create_file(&(dev->dev), &dev_attr_AUXADC_Channel_Is_Calibration)) != 0) goto exit;
return 0;
exit:
return 1;
}
static int adc_channel_info_init(void)
{
unsigned int used_channel_counter = 0;
used_channel_counter = 0;
#ifdef AUXADC_TEMPERATURE_CHANNEL
//ap_domain &= ~(1<<CUST_ADC_MD_CHANNEL);
sprintf(g_adc_info[used_channel_counter].channel_name, "ADC_RFTMP");
g_adc_info[used_channel_counter].channel_number = AUXADC_TEMPERATURE_CHANNEL;
printk("[ADC] channel_name = %s channel num=%d\n", g_adc_info[used_channel_counter].channel_name
,g_adc_info[used_channel_counter].channel_number);
used_channel_counter++;
#endif
#ifdef AUXADC_ADC_FDD_RF_PARAMS_DYNAMIC_CUSTOM_CH_CHANNEL
sprintf(g_adc_info[used_channel_counter].channel_name, "ADC_FDD_Rf_Params_Dynamic_Custom");
g_adc_info[used_channel_counter].channel_number = AUXADC_ADC_FDD_RF_PARAMS_DYNAMIC_CUSTOM_CH_CHANNEL;
printk("[ADC] channel_name = %s channel num=%d\n", g_adc_info[used_channel_counter].channel_name
,g_adc_info[used_channel_counter].channel_number);
used_channel_counter++;
#endif
#ifdef AUXADC_HF_MIC_CHANNEL
sprintf(g_adc_info[used_channel_counter].channel_name, "ADC_MIC");
g_adc_info[used_channel_counter].channel_number = AUXADC_HF_MIC_CHANNEL;
printk("[ADC] channel_name = %s channel num=%d\n", g_adc_info[used_channel_counter].channel_name
,g_adc_info[used_channel_counter].channel_number);
used_channel_counter++;
#endif
return 0;
}
// platform_driver API
static int mt_auxadc_probe(struct platform_device *dev)
{
int ret = 0;
struct class_device *class_dev = NULL;
printk("******** MT AUXADC driver probe!! ********\n");
adc_channel_info_init();
if(enable_clock(MT_PDN_PERI_AUXADC,"AUXADC"))
printk("hwEnableClock AUXADC failed.");
/* Integrate with NVRAM */
ret = alloc_chrdev_region(&auxadc_cali_devno, 0, 1, AUXADC_CALI_DEVNAME);
if (ret)
printk("Error: Can't Get Major number for auxadc_cali\n");
auxadc_cali_cdev = cdev_alloc();
auxadc_cali_cdev->owner = THIS_MODULE;
auxadc_cali_cdev->ops = &auxadc_cali_fops;
ret = cdev_add(auxadc_cali_cdev, auxadc_cali_devno, 1);
if(ret)
printk("auxadc_cali Error: cdev_add\n");
auxadc_cali_major = MAJOR(auxadc_cali_devno);
auxadc_cali_class = class_create(THIS_MODULE, AUXADC_CALI_DEVNAME);
class_dev = (struct class_device *)device_create(auxadc_cali_class,
NULL, auxadc_cali_devno, NULL, AUXADC_CALI_DEVNAME);
printk("[MT AUXADC_probe] NVRAM prepare : done !!\n");
if(mt_auxadc_create_device_attr(dev))
goto exit;
g_adc_init_flag =1;
//read calibration data from EFUSE
mt_auxadc_hal_init();
exit:
return ret;
}
static int mt_auxadc_remove(struct platform_device *dev)
{
printk("******** MT auxadc driver remove!! ********\n" );
return 0;
}
static void mt_auxadc_shutdown(struct platform_device *dev)
{
printk("******** MT auxadc driver shutdown!! ********\n" );
}
static int mt_auxadc_suspend(struct platform_device *dev, pm_message_t state)
{
//printk("******** MT auxadc driver suspend!! ********\n" );
/*
if(disable_clock(MT_PDN_PERI_AUXADC,"AUXADC"))
printk("hwEnableClock AUXADC failed.");
*/
mt_auxadc_hal_suspend();
return 0;
}
static int mt_auxadc_resume(struct platform_device *dev)
{
//printk("******** MT auxadc driver resume!! ********\n" );
/*
if(enable_clock(MT_PDN_PERI_AUXADC,"AUXADC"))
{
printk("hwEnableClock AUXADC again!!!.");
if(enable_clock(MT_PDN_PERI_AUXADC,"AUXADC"))
{printk("hwEnableClock AUXADC failed.");}
}
*/
mt_auxadc_hal_resume();
return 0;
}
static struct platform_driver mt_auxadc_driver = {
.probe = mt_auxadc_probe,
.remove = mt_auxadc_remove,
.shutdown = mt_auxadc_shutdown,
#ifdef CONFIG_PM
.suspend = mt_auxadc_suspend,
.resume = mt_auxadc_resume,
#endif
.driver = {
.name = "mt-auxadc",
},
};
static int __init mt_auxadc_init(void)
{
int ret;
ret = platform_driver_register(&mt_auxadc_driver);
if (ret) {
printk("****[mt_auxadc_driver] Unable to register driver (%d)\n", ret);
return ret;
}
printk("****[mt_auxadc_driver] Initialization : DONE \n");
return 0;
}
static void __exit mt_auxadc_exit (void)
{
}
module_init(mt_auxadc_init);
module_exit(mt_auxadc_exit);
MODULE_AUTHOR("MTK");
MODULE_DESCRIPTION("MTK AUXADC Device Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kraj/gcc | gcc/testsuite/gfortran.dg/argument_checking_13.f90 | 12 | 2994 | ! { dg-do compile }
!
! PR fortran/34796
!
! Argument checks:
! - elements of deferred-shape arrays (= non-dummies) are allowed
! as the memory is contiguous
! - while assumed-shape arrays (= dummy arguments) and pointers are
! not (strides can make them non-contiguous)
! and
! - if the memory is non-contigous, character arguments have as
! storage size only the size of the element itself, check for
! too short actual arguments.
!
subroutine test1(assumed_sh_dummy, pointer_dummy)
implicit none
interface
subroutine rlv1(y)
real :: y(3)
end subroutine rlv1
end interface
real :: assumed_sh_dummy(:,:,:)
real, pointer :: pointer_dummy(:,:,:)
real, allocatable :: deferred(:,:,:)
real, pointer :: ptr(:,:,:)
call rlv1(deferred(1,1,1)) ! valid since contiguous
call rlv1(ptr(1,1,1)) ! { dg-error "Element of assumed-shape or pointer array" }
call rlv1(assumed_sh_dummy(1,1,1)) ! { dg-error "Element of assumed-shape or pointer array" }
call rlv1(pointer_dummy(1,1,1)) ! { dg-error "Element of assumed-shape or pointer array" }
end
subroutine test2(assumed_sh_dummy, pointer_dummy)
implicit none
interface
subroutine rlv2(y)
character :: y(3)
end subroutine rlv2
end interface
character(3) :: assumed_sh_dummy(:,:,:)
character(3), pointer :: pointer_dummy(:,:,:)
character(3), allocatable :: deferred(:,:,:)
character(3), pointer :: ptr(:,:,:)
call rlv2(deferred(1,1,1)) ! Valid since contiguous
call rlv2(ptr(1,1,1)) ! Valid F2003
call rlv2(assumed_sh_dummy(1,1,1)) ! Valid F2003
call rlv2(pointer_dummy(1,1,1)) ! Valid F2003
! The following is kind of ok: The memory access it valid
! We warn nonetheless as the result is not what is intented
! and also formally wrong.
! Using (1:string_length) would be ok.
call rlv2(ptr(1,1,1)(1:1)) ! { dg-error "contains too few elements" }
call rlv2(assumed_sh_dummy(1,1,1)(1:2)) ! { dg-error "contains too few elements" }
call rlv2(pointer_dummy(1,1,1)(1:3)) ! Valid F2003
end
subroutine test3(assumed_sh_dummy, pointer_dummy)
implicit none
interface
subroutine rlv3(y)
character :: y(3)
end subroutine rlv3
end interface
character(2) :: assumed_sh_dummy(:,:,:)
character(2), pointer :: pointer_dummy(:,:,:)
character(2), allocatable :: deferred(:,:,:)
character(2), pointer :: ptr(:,:,:)
call rlv3(deferred(1,1,1)) ! Valid since contiguous
call rlv3(ptr(1,1,1)) ! { dg-error "contains too few elements" }
call rlv3(assumed_sh_dummy(1,1,1)) ! { dg-error "contains too few elements" }
call rlv3(pointer_dummy(1,1,1)) ! { dg-error "contains too few elements" }
call rlv3(deferred(1,1,1)(1:2)) ! Valid since contiguous
call rlv3(ptr(1,1,1)(1:2)) ! { dg-error "contains too few elements" }
call rlv3(assumed_sh_dummy(1,1,1)(1:2)) ! { dg-error "contains too few elements" }
call rlv3(pointer_dummy(1,1,1)(1:2)) ! { dg-error "contains too few elements" }
end
| gpl-2.0 |
tpltnt/clamav-devel | win32/clamav-for-windows/sigui/wxWidgets-2.9.1/src/os2/button.cpp | 12 | 13657 | /////////////////////////////////////////////////////////////////////////////
// Name: src/os2/button.cpp
// Purpose: wxButton
// Author: David Webster
// Modified by:
// Created: 10/13/99
// RCS-ID: $Id$
// Copyright: (c) David Webster
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#include "wx/button.h"
#ifndef WX_PRECOMP
#include "wx/app.h"
#include "wx/brush.h"
#include "wx/panel.h"
#include "wx/bmpbuttn.h"
#include "wx/settings.h"
#include "wx/dcscreen.h"
#include "wx/scrolwin.h"
#include "wx/toplevel.h"
#endif
#include "wx/stockitem.h"
#include "wx/os2/private.h"
#define BUTTON_HEIGHT_FROM_CHAR_HEIGHT(cy) (11*EDIT_HEIGHT_FROM_CHAR_HEIGHT(cy)/10)
//
// Should be at the very least less than winDEFAULT_BUTTON_MARGIN
//
#define FOCUS_MARGIN 3
#ifndef BST_CHECKED
#define BST_CHECKED 0x0001
#endif
IMPLEMENT_DYNAMIC_CLASS(wxButton, wxControl)
// Button
bool wxButton::Create( wxWindow* pParent,
wxWindowID vId,
const wxString& rsLbl,
const wxPoint& rPos,
const wxSize& rSize,
long lStyle,
const wxValidator& rValidator,
const wxString& rsName)
{
wxString rsLabel(rsLbl);
if (rsLabel.empty() && wxIsStockID(vId))
rsLabel = wxGetStockLabel(vId);
wxString sLabel = ::wxPMTextToLabel(rsLabel);
SetName(rsName);
#if wxUSE_VALIDATORS
SetValidator(rValidator);
#endif
m_windowStyle = lStyle;
pParent->AddChild((wxButton *)this);
if (vId == -1)
m_windowId = NewControlId();
else
m_windowId = vId;
lStyle = WS_VISIBLE | WS_TABSTOP | BS_PUSHBUTTON;
//
// OS/2 PM does not have Right/Left/Top/Bottom styles.
// We will have to define an additional style when we implement notebooks
// for a notebook page button
//
if (m_windowStyle & wxCLIP_SIBLINGS )
lStyle |= WS_CLIPSIBLINGS;
m_hWnd = (WXHWND)::WinCreateWindow( GetHwndOf(pParent) // Parent handle
,WC_BUTTON // A Button class window
,sLabel.c_str() // Button text
,lStyle // Button style
,0, 0, 0, 0 // Location and size
,GetHwndOf(pParent) // Owner handle
,HWND_TOP // Top of Z-Order
,vId // Identifier
,NULL // No control data
,NULL // No Presentation parameters
);
if (m_hWnd == 0)
{
return false;
}
//
// Subclass again for purposes of dialog editing mode
//
SubclassWin(m_hWnd);
wxFont* pButtonFont = new wxFont( 8
,wxSWISS
,wxNORMAL
,wxNORMAL
);
SetFont(*pButtonFont);
SetXComp(0);
SetYComp(0);
SetSize( rPos.x
,rPos.y
,rSize.x
,rSize.y
);
delete pButtonFont;
return true;
} // end of wxButton::Create
wxButton::~wxButton()
{
wxTopLevelWindow *tlw = wxDynamicCast(wxGetTopLevelParent(this), wxTopLevelWindow);
if (tlw)
{
if (tlw->GetDefaultItem() == this)
{
//
// Don't leave the panel with invalid default item
//
tlw->SetDefaultItem(NULL);
}
}
} // end of wxButton::~wxButton
// ----------------------------------------------------------------------------
// size management including autosizing
// ----------------------------------------------------------------------------
wxSize wxButton::DoGetBestSize() const
{
wxString rsLabel = wxGetWindowText(GetHWND());
int nWidthButton;
int nWidthChar;
int nHeightChar;
wxFont vFont = (wxFont)GetFont();
GetTextExtent( rsLabel
,&nWidthButton
,NULL
);
wxGetCharSize( GetHWND()
,&nWidthChar
,&nHeightChar
,&vFont
);
//
// Add a margin - the button is wider than just its label
//
nWidthButton += 3 * nWidthChar;
//
// The button height is proportional to the height of the font used
//
int nHeightButton = BUTTON_HEIGHT_FROM_CHAR_HEIGHT(nHeightChar);
//
// Need a little extra to make it look right
//
nHeightButton += (int)(nHeightChar/1.5);
if (!HasFlag(wxBU_EXACTFIT))
{
wxSize vSize = GetDefaultSize();
if (nWidthButton > vSize.x)
vSize.x = nWidthButton;
if (nHeightButton > vSize.y)
vSize.y = nHeightButton;
return vSize;
}
return wxSize( nWidthButton
,nHeightButton
);
} // end of wxButton::DoGetBestSize
/* static */
wxSize wxButton::GetDefaultSize()
{
static wxSize vSizeBtn;
if (vSizeBtn.x == 0)
{
wxScreenDC vDc;
vDc.SetFont(wxSystemSettings::GetFont(wxSYS_DEFAULT_GUI_FONT));
//
// The size of a standard button in the dialog units is 50x14,
// translate this to pixels
// NB1: the multipliers come from the Windows convention
// NB2: the extra +1/+2 were needed to get the size be the same as the
// size of the buttons in the standard dialog - I don't know how
// this happens, but on my system this size is 75x23 in pixels and
// 23*8 isn't even divisible by 14... Would be nice to understand
// why these constants are needed though!
vSizeBtn.x = (50 * (vDc.GetCharWidth() + 1))/4;
vSizeBtn.y = ((14 * vDc.GetCharHeight()) + 2)/8;
}
return vSizeBtn;
} // end of wxButton::GetDefaultSize
void wxButton::Command (
wxCommandEvent& rEvent
)
{
ProcessCommand (rEvent);
} // end of wxButton::Command
// ----------------------------------------------------------------------------
// helpers
// ----------------------------------------------------------------------------
bool wxButton::SendClickEvent()
{
wxCommandEvent vEvent( wxEVT_COMMAND_BUTTON_CLICKED
,GetId()
);
vEvent.SetEventObject(this);
return ProcessCommand(vEvent);
} // end of wxButton::SendClickEvent
wxWindow *wxButton::SetDefault()
{
//
// Set this one as the default button both for wxWidgets and Windows
//
wxWindow* pWinOldDefault = wxButtonBase::SetDefault();
SetDefaultStyle( wxDynamicCast(pWinOldDefault, wxButton), false);
SetDefaultStyle( this, true );
return pWinOldDefault;
} // end of wxButton::SetDefault
void wxButton::SetTmpDefault()
{
wxTopLevelWindow *tlw = wxDynamicCast(wxGetTopLevelParent(this), wxTopLevelWindow);
wxCHECK_RET( tlw, wxT("button without top level window?") );
wxWindow* pWinOldDefault = tlw->GetDefaultItem();
tlw->SetTmpDefaultItem(this);
SetDefaultStyle( wxDynamicCast(pWinOldDefault, wxButton), false);
SetDefaultStyle( this, true );
} // end of wxButton::SetTmpDefault
void wxButton::UnsetTmpDefault()
{
wxTopLevelWindow *tlw = wxDynamicCast(wxGetTopLevelParent(this), wxTopLevelWindow);
wxCHECK_RET( tlw, wxT("button without top level window?") );
tlw->SetTmpDefaultItem(NULL);
wxWindow* pWinOldDefault = tlw->GetDefaultItem();
SetDefaultStyle( this, false );
SetDefaultStyle( wxDynamicCast(pWinOldDefault, wxButton), true );
} // end of wxButton::UnsetTmpDefault
void wxButton::SetDefaultStyle(
wxButton* pBtn
, bool bOn
)
{
long lStyle;
//
// We may be called with NULL pointer -- simpler to do the check here than
// in the caller which does wxDynamicCast()
//
if (!pBtn)
return;
//
// First, let DefDlgProc() know about the new default button
//
if (bOn)
{
if (!wxTheApp->IsActive())
return;
//
// In OS/2 the dialog/panel doesn't really know it has a default
// button, the default button simply has that style. We'll just
// simulate by setting focus to it
//
pBtn->SetFocus();
}
lStyle = ::WinQueryWindowULong(GetHwndOf(pBtn), QWL_STYLE);
if (!(lStyle & BS_DEFAULT) == bOn)
{
if ((lStyle & BS_USERBUTTON) != BS_USERBUTTON)
{
if (bOn)
lStyle |= BS_DEFAULT;
else
lStyle &= ~BS_DEFAULT;
::WinSetWindowULong(GetHwndOf(pBtn), QWL_STYLE, lStyle);
}
else
{
//
// Redraw the button - it will notice itself that it's not the
// default one any longer
//
pBtn->Refresh();
}
}
} // end of wxButton::UpdateDefaultStyle
// ----------------------------------------------------------------------------
// event/message handlers
// ----------------------------------------------------------------------------
bool wxButton::OS2Command(WXUINT uParam, WXWORD WXUNUSED(wId))
{
bool bProcessed = false;
switch (uParam)
{
case BN_CLICKED: // normal buttons send this
case BN_DBLCLICKED: // owner-drawn ones also send this
bProcessed = SendClickEvent();
break;
}
return bProcessed;
} // end of wxButton::OS2Command
WXHBRUSH wxButton::OnCtlColor( WXHDC WXUNUSED(pDC),
WXHWND WXUNUSED(pWnd),
WXUINT WXUNUSED(nCtlColor),
WXUINT WXUNUSED(uMessage),
WXWPARAM WXUNUSED(wParam),
WXLPARAM WXUNUSED(lParam) )
{
wxBrush* pBackgroundBrush = wxTheBrushList->FindOrCreateBrush( GetBackgroundColour()
,wxSOLID
);
return (WXHBRUSH)pBackgroundBrush->GetResourceHandle();
} // end of wxButton::OnCtlColor
void wxButton::MakeOwnerDrawn()
{
long lStyle = 0L;
lStyle = ::WinQueryWindowULong(GetHwnd(), QWL_STYLE);
if ((lStyle & BS_USERBUTTON) != BS_USERBUTTON)
{
//
// Make it so
//
lStyle |= BS_USERBUTTON;
::WinSetWindowULong(GetHwnd(), QWL_STYLE, lStyle);
}
} // end of wxButton::MakeOwnerDrawn
WXDWORD wxButton::OS2GetStyle(
long lStyle
, WXDWORD* pdwExstyle
) const
{
//
// Buttons never have an external border, they draw their own one
//
WXDWORD dwStyle = wxControl::OS2GetStyle( (lStyle & ~wxBORDER_MASK) | wxBORDER_NONE
,pdwExstyle
);
//
// We must use WS_CLIPSIBLINGS with the buttons or they would draw over
// each other in any resizeable dialog which has more than one button in
// the bottom
//
dwStyle |= WS_CLIPSIBLINGS;
return dwStyle;
} // end of wxButton::OS2GetStyle
MRESULT wxButton::WindowProc( WXUINT uMsg,
WXWPARAM wParam,
WXLPARAM lParam )
{
//
// When we receive focus, we want to temporary become the default button in
// our parent panel so that pressing "Enter" would activate us -- and when
// losing it we should restore the previous default button as well
//
if (uMsg == WM_SETFOCUS)
{
if (SHORT1FROMMP(lParam) == TRUE)
SetTmpDefault();
else
UnsetTmpDefault();
//
// Let the default processign take place too
//
}
else if (uMsg == WM_BUTTON1DBLCLK)
{
//
// Emulate a click event to force an owner-drawn button to change its
// appearance - without this, it won't do it
//
(void)wxControl::OS2WindowProc( WM_BUTTON1DOWN
,wParam
,lParam
);
//
// And conitnue with processing the message normally as well
//
}
//
// Let the base class do all real processing
//
return (wxControl::OS2WindowProc( uMsg
,wParam
,lParam
));
} // end of wxWindowProc
| gpl-2.0 |
hoxton22/CriseHearth | src/server/game/Handlers/TradeHandler.cpp | 12 | 27317 | /*
* Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Common.h"
#include "WorldPacket.h"
#include "WorldSession.h"
#include "World.h"
#include "ObjectAccessor.h"
#include "Log.h"
#include "Player.h"
#include "Item.h"
#include "Spell.h"
#include "SocialMgr.h"
#include "Language.h"
#include "AccountMgr.h"
#include "TradePackets.h"
#include "TradeData.h"
void WorldSession::SendTradeStatus(WorldPackets::Trade::TradeStatus& info)
{
info.Clear(); // reuse packet
Player* trader = _player->GetTrader();
info.PartnerIsSameBnetAccount = trader && trader->GetSession()->GetBattlenetAccountId() == GetBattlenetAccountId();
SendPacket(info.Write());
}
void WorldSession::HandleIgnoreTradeOpcode(WorldPackets::Trade::IgnoreTrade& /*ignoreTrade*/)
{
}
void WorldSession::HandleBusyTradeOpcode(WorldPackets::Trade::BusyTrade& /*busyTrade*/)
{
}
void WorldSession::SendUpdateTrade(bool trader_data /*= true*/)
{
TradeData* view_trade = trader_data ? _player->GetTradeData()->GetTraderData() : _player->GetTradeData();
WorldPackets::Trade::TradeUpdated tradeUpdated;
tradeUpdated.WhichPlayer = trader_data;
tradeUpdated.ClientStateIndex = view_trade->GetClientStateIndex();
tradeUpdated.CurrentStateIndex = view_trade->GetServerStateIndex();
tradeUpdated.Gold = view_trade->GetMoney();
tradeUpdated.ProposedEnchantment = view_trade->GetSpell();
for (uint8 i = 0; i < TRADE_SLOT_COUNT; ++i)
{
if (Item* item = view_trade->GetItem(TradeSlots(i)))
{
WorldPackets::Trade::TradeUpdated::TradeItem tradeItem;
tradeItem.Slot = i;
tradeItem.EntryID = item->GetEntry();
tradeItem.StackCount = item->GetCount();
tradeItem.GiftCreator = item->GetGuidValue(ITEM_FIELD_GIFTCREATOR);
if (!item->HasFlag(ITEM_FIELD_FLAGS, ITEM_FIELD_FLAG_WRAPPED))
{
tradeItem.Unwrapped = boost::in_place();
tradeItem.Unwrapped->Item.Initialize(item);
tradeItem.Unwrapped->EnchantID = item->GetEnchantmentId(PERM_ENCHANTMENT_SLOT);
tradeItem.Unwrapped->OnUseEnchantmentID = item->GetEnchantmentId(USE_ENCHANTMENT_SLOT);
tradeItem.Unwrapped->Creator = item->GetGuidValue(ITEM_FIELD_CREATOR);
tradeItem.Unwrapped->Charges = item->GetSpellCharges();
tradeItem.Unwrapped->Lock = item->GetTemplate()->GetLockID() && !item->HasFlag(ITEM_FIELD_FLAGS, ITEM_FIELD_FLAG_UNLOCKED);
tradeItem.Unwrapped->MaxDurability = item->GetUInt32Value(ITEM_FIELD_MAXDURABILITY);
tradeItem.Unwrapped->Durability = item->GetUInt32Value(ITEM_FIELD_DURABILITY);
for (uint32 s = SOCK_ENCHANTMENT_SLOT; s < MAX_GEM_SOCKETS; ++s)
tradeItem.Unwrapped->SocketEnchant[s] = item->GetEnchantmentId(EnchantmentSlot(s + SOCK_ENCHANTMENT_SLOT));
}
tradeUpdated.Items.push_back(tradeItem);
}
}
SendPacket(tradeUpdated.Write());
}
//==============================================================
// transfer the items to the players
void WorldSession::moveItems(Item* myItems[], Item* hisItems[])
{
Player* trader = _player->GetTrader();
if (!trader)
return;
for (uint8 i = 0; i < TRADE_SLOT_TRADED_COUNT; ++i)
{
ItemPosCountVec traderDst;
ItemPosCountVec playerDst;
bool traderCanTrade = (myItems[i] == NULL || trader->CanStoreItem(NULL_BAG, NULL_SLOT, traderDst, myItems[i], false) == EQUIP_ERR_OK);
bool playerCanTrade = (hisItems[i] == NULL || _player->CanStoreItem(NULL_BAG, NULL_SLOT, playerDst, hisItems[i], false) == EQUIP_ERR_OK);
if (traderCanTrade && playerCanTrade)
{
// Ok, if trade item exists and can be stored
// If we trade in both directions we had to check, if the trade will work before we actually do it
// A roll back is not possible after we stored it
if (myItems[i])
{
// logging
TC_LOG_DEBUG("network", "partner storing: %s", myItems[i]->GetGUID().ToString().c_str());
if (HasPermission(rbac::RBAC_PERM_LOG_GM_TRADE))
{
sLog->outCommand(_player->GetSession()->GetAccountId(), "GM %s (Account: %u) trade: %s (Entry: %d Count: %u) to player: %s (Account: %u)",
_player->GetName().c_str(), _player->GetSession()->GetAccountId(),
myItems[i]->GetTemplate()->GetDefaultLocaleName(), myItems[i]->GetEntry(), myItems[i]->GetCount(),
trader->GetName().c_str(), trader->GetSession()->GetAccountId());
}
// adjust time (depends on /played)
if (myItems[i]->HasFlag(ITEM_FIELD_FLAGS, ITEM_FIELD_FLAG_BOP_TRADEABLE))
myItems[i]->SetUInt32Value(ITEM_FIELD_CREATE_PLAYED_TIME, trader->GetTotalPlayedTime()-(_player->GetTotalPlayedTime()-myItems[i]->GetUInt32Value(ITEM_FIELD_CREATE_PLAYED_TIME)));
// store
trader->MoveItemToInventory(traderDst, myItems[i], true, true);
}
if (hisItems[i])
{
// logging
TC_LOG_DEBUG("network", "player storing: %s", hisItems[i]->GetGUID().ToString().c_str());
if (HasPermission(rbac::RBAC_PERM_LOG_GM_TRADE))
{
sLog->outCommand(trader->GetSession()->GetAccountId(), "GM %s (Account: %u) trade: %s (Entry: %d Count: %u) to player: %s (Account: %u)",
trader->GetName().c_str(), trader->GetSession()->GetAccountId(),
hisItems[i]->GetTemplate()->GetDefaultLocaleName(), hisItems[i]->GetEntry(), hisItems[i]->GetCount(),
_player->GetName().c_str(), _player->GetSession()->GetAccountId());
}
// adjust time (depends on /played)
if (hisItems[i]->HasFlag(ITEM_FIELD_FLAGS, ITEM_FIELD_FLAG_BOP_TRADEABLE))
hisItems[i]->SetUInt32Value(ITEM_FIELD_CREATE_PLAYED_TIME, _player->GetTotalPlayedTime()-(trader->GetTotalPlayedTime()-hisItems[i]->GetUInt32Value(ITEM_FIELD_CREATE_PLAYED_TIME)));
// store
_player->MoveItemToInventory(playerDst, hisItems[i], true, true);
}
}
else
{
// in case of fatal error log error message
// return the already removed items to the original owner
if (myItems[i])
{
if (!traderCanTrade)
TC_LOG_ERROR("network", "trader can't store item: %s", myItems[i]->GetGUID().ToString().c_str());
if (_player->CanStoreItem(NULL_BAG, NULL_SLOT, playerDst, myItems[i], false) == EQUIP_ERR_OK)
_player->MoveItemToInventory(playerDst, myItems[i], true, true);
else
TC_LOG_ERROR("network", "player can't take item back: %s", myItems[i]->GetGUID().ToString().c_str());
}
// return the already removed items to the original owner
if (hisItems[i])
{
if (!playerCanTrade)
TC_LOG_ERROR("network", "player can't store item: %s", hisItems[i]->GetGUID().ToString().c_str());
if (trader->CanStoreItem(NULL_BAG, NULL_SLOT, traderDst, hisItems[i], false) == EQUIP_ERR_OK)
trader->MoveItemToInventory(traderDst, hisItems[i], true, true);
else
TC_LOG_ERROR("network", "trader can't take item back: %s", hisItems[i]->GetGUID().ToString().c_str());
}
}
}
}
//==============================================================
static void setAcceptTradeMode(TradeData* myTrade, TradeData* hisTrade, Item* *myItems, Item* *hisItems)
{
myTrade->SetInAcceptProcess(true);
hisTrade->SetInAcceptProcess(true);
// store items in local list and set 'in-trade' flag
for (uint8 i = 0; i < TRADE_SLOT_TRADED_COUNT; ++i)
{
if (Item* item = myTrade->GetItem(TradeSlots(i)))
{
TC_LOG_DEBUG("network", "player trade %s bag: %u slot: %u", item->GetGUID().ToString().c_str(), item->GetBagSlot(), item->GetSlot());
//Can return NULL
myItems[i] = item;
myItems[i]->SetInTrade();
}
if (Item* item = hisTrade->GetItem(TradeSlots(i)))
{
TC_LOG_DEBUG("network", "partner trade %s bag: %u slot: %u", item->GetGUID().ToString().c_str(), item->GetBagSlot(), item->GetSlot());
hisItems[i] = item;
hisItems[i]->SetInTrade();
}
}
}
static void clearAcceptTradeMode(TradeData* myTrade, TradeData* hisTrade)
{
myTrade->SetInAcceptProcess(false);
hisTrade->SetInAcceptProcess(false);
}
static void clearAcceptTradeMode(Item* *myItems, Item* *hisItems)
{
// clear 'in-trade' flag
for (uint8 i = 0; i < TRADE_SLOT_TRADED_COUNT; ++i)
{
if (myItems[i])
myItems[i]->SetInTrade(false);
if (hisItems[i])
hisItems[i]->SetInTrade(false);
}
}
void WorldSession::HandleAcceptTradeOpcode(WorldPackets::Trade::AcceptTrade& acceptTrade)
{
TradeData* my_trade = _player->m_trade;
if (!my_trade)
return;
Player* trader = my_trade->GetTrader();
TradeData* his_trade = trader->m_trade;
if (!his_trade)
return;
Item* myItems[TRADE_SLOT_TRADED_COUNT] = { NULL, NULL, NULL, NULL, NULL, NULL };
Item* hisItems[TRADE_SLOT_TRADED_COUNT] = { NULL, NULL, NULL, NULL, NULL, NULL };
// set before checks for propertly undo at problems (it already set in to client)
my_trade->SetAccepted(true);
WorldPackets::Trade::TradeStatus info;
if (his_trade->GetServerStateIndex() != acceptTrade.StateIndex)
{
info.Status = TRADE_STATUS_STATE_CHANGED;
SendTradeStatus(info);
my_trade->SetAccepted(false);
return;
}
if (!_player->IsWithinDistInMap(trader, TRADE_DISTANCE, false))
{
info.Status = TRADE_STATUS_TOO_FAR_AWAY;
SendTradeStatus(info);
my_trade->SetAccepted(false);
return;
}
// not accept case incorrect money amount
if (!_player->HasEnoughMoney(my_trade->GetMoney()))
{
info.Status = TRADE_STATUS_FAILED;
info.BagResult = EQUIP_ERR_NOT_ENOUGH_MONEY;
SendTradeStatus(info);
my_trade->SetAccepted(false, true);
return;
}
// not accept case incorrect money amount
if (!trader->HasEnoughMoney(his_trade->GetMoney()))
{
info.Status = TRADE_STATUS_FAILED;
info.BagResult = EQUIP_ERR_NOT_ENOUGH_MONEY;
trader->GetSession()->SendTradeStatus(info);
his_trade->SetAccepted(false, true);
return;
}
if (_player->GetMoney() >= uint64(MAX_MONEY_AMOUNT) - his_trade->GetMoney())
{
info.Status = TRADE_STATUS_FAILED;
info.BagResult = EQUIP_ERR_TOO_MUCH_GOLD;
SendTradeStatus(info);
my_trade->SetAccepted(false, true);
return;
}
if (trader->GetMoney() >= uint64(MAX_MONEY_AMOUNT) - my_trade->GetMoney())
{
info.Status = TRADE_STATUS_FAILED;
info.BagResult = EQUIP_ERR_TOO_MUCH_GOLD;
trader->GetSession()->SendTradeStatus(info);
his_trade->SetAccepted(false, true);
return;
}
// not accept if some items now can't be trade (cheating)
for (uint8 i = 0; i < TRADE_SLOT_TRADED_COUNT; ++i)
{
if (Item* item = my_trade->GetItem(TradeSlots(i)))
{
if (!item->CanBeTraded(false, true))
{
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
return;
}
if (item->IsBindedNotWith(trader))
{
info.Status = TRADE_STATUS_FAILED;
info.BagResult = EQUIP_ERR_TRADE_BOUND_ITEM;
SendTradeStatus(info);
return;
}
}
if (Item* item = his_trade->GetItem(TradeSlots(i)))
{
if (!item->CanBeTraded(false, true))
{
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
return;
}
//if (item->IsBindedNotWith(_player)) // dont mark as invalid when his item isnt good (not exploitable because if item is invalid trade will fail anyway later on the same check)
//{
// SendTradeStatus(TRADE_STATUS_NOT_ELIGIBLE);
// his_trade->SetAccepted(false, true);
// return;
//}
}
}
if (his_trade->IsAccepted())
{
setAcceptTradeMode(my_trade, his_trade, myItems, hisItems);
Spell* my_spell = NULL;
SpellCastTargets my_targets;
Spell* his_spell = NULL;
SpellCastTargets his_targets;
// not accept if spell can't be cast now (cheating)
if (uint32 my_spell_id = my_trade->GetSpell())
{
SpellInfo const* spellEntry = sSpellMgr->GetSpellInfo(my_spell_id);
Item* castItem = my_trade->GetSpellCastItem();
if (!spellEntry || !his_trade->GetItem(TRADE_SLOT_NONTRADED) ||
(my_trade->HasSpellCastItem() && !castItem))
{
clearAcceptTradeMode(my_trade, his_trade);
clearAcceptTradeMode(myItems, hisItems);
my_trade->SetSpell(0);
return;
}
my_spell = new Spell(_player, spellEntry, TRIGGERED_FULL_MASK);
my_spell->m_CastItem = castItem;
my_targets.SetTradeItemTarget(_player);
my_spell->m_targets = my_targets;
SpellCastResult res = my_spell->CheckCast(true);
if (res != SPELL_CAST_OK)
{
my_spell->SendCastResult(res);
clearAcceptTradeMode(my_trade, his_trade);
clearAcceptTradeMode(myItems, hisItems);
delete my_spell;
my_trade->SetSpell(0);
return;
}
}
// not accept if spell can't be cast now (cheating)
if (uint32 his_spell_id = his_trade->GetSpell())
{
SpellInfo const* spellEntry = sSpellMgr->GetSpellInfo(his_spell_id);
Item* castItem = his_trade->GetSpellCastItem();
if (!spellEntry || !my_trade->GetItem(TRADE_SLOT_NONTRADED) || (his_trade->HasSpellCastItem() && !castItem))
{
delete my_spell;
his_trade->SetSpell(0);
clearAcceptTradeMode(my_trade, his_trade);
clearAcceptTradeMode(myItems, hisItems);
return;
}
his_spell = new Spell(trader, spellEntry, TRIGGERED_FULL_MASK);
his_spell->m_CastItem = castItem;
his_targets.SetTradeItemTarget(trader);
his_spell->m_targets = his_targets;
SpellCastResult res = his_spell->CheckCast(true);
if (res != SPELL_CAST_OK)
{
his_spell->SendCastResult(res);
clearAcceptTradeMode(my_trade, his_trade);
clearAcceptTradeMode(myItems, hisItems);
delete my_spell;
delete his_spell;
his_trade->SetSpell(0);
return;
}
}
// inform partner client
info.Status = TRADE_STATUS_ACCEPTED;
trader->GetSession()->SendTradeStatus(info);
// test if item will fit in each inventory
WorldPackets::Trade::TradeStatus myCanCompleteInfo, hisCanCompleteInfo;
hisCanCompleteInfo.BagResult = trader->CanStoreItems(myItems, TRADE_SLOT_TRADED_COUNT, &hisCanCompleteInfo.ItemID);
myCanCompleteInfo.BagResult = _player->CanStoreItems(hisItems, TRADE_SLOT_TRADED_COUNT, &myCanCompleteInfo.ItemID);
clearAcceptTradeMode(myItems, hisItems);
// in case of missing space report error
if (myCanCompleteInfo.BagResult != EQUIP_ERR_OK)
{
clearAcceptTradeMode(my_trade, his_trade);
myCanCompleteInfo.Status = TRADE_STATUS_FAILED;
trader->GetSession()->SendTradeStatus(myCanCompleteInfo);
myCanCompleteInfo.FailureForYou = true;
SendTradeStatus(myCanCompleteInfo);
my_trade->SetAccepted(false);
his_trade->SetAccepted(false);
delete my_spell;
delete his_spell;
return;
}
else if (hisCanCompleteInfo.BagResult != EQUIP_ERR_OK)
{
clearAcceptTradeMode(my_trade, his_trade);
hisCanCompleteInfo.Status = TRADE_STATUS_FAILED;
SendTradeStatus(hisCanCompleteInfo);
hisCanCompleteInfo.FailureForYou = true;
trader->GetSession()->SendTradeStatus(hisCanCompleteInfo);
my_trade->SetAccepted(false);
his_trade->SetAccepted(false);
delete my_spell;
delete his_spell;
return;
}
// execute trade: 1. remove
for (uint8 i = 0; i < TRADE_SLOT_TRADED_COUNT; ++i)
{
if (myItems[i])
{
myItems[i]->SetGuidValue(ITEM_FIELD_GIFTCREATOR, _player->GetGUID());
_player->MoveItemFromInventory(myItems[i]->GetBagSlot(), myItems[i]->GetSlot(), true);
}
if (hisItems[i])
{
hisItems[i]->SetGuidValue(ITEM_FIELD_GIFTCREATOR, trader->GetGUID());
trader->MoveItemFromInventory(hisItems[i]->GetBagSlot(), hisItems[i]->GetSlot(), true);
}
}
// execute trade: 2. store
moveItems(myItems, hisItems);
// logging money
if (HasPermission(rbac::RBAC_PERM_LOG_GM_TRADE))
{
if (my_trade->GetMoney() > 0)
{
sLog->outCommand(_player->GetSession()->GetAccountId(), "GM %s (Account: %u) give money (Amount: " UI64FMTD ") to player: %s (Account: %u)",
_player->GetName().c_str(), _player->GetSession()->GetAccountId(),
my_trade->GetMoney(),
trader->GetName().c_str(), trader->GetSession()->GetAccountId());
}
if (his_trade->GetMoney() > 0)
{
sLog->outCommand(trader->GetSession()->GetAccountId(), "GM %s (Account: %u) give money (Amount: " UI64FMTD ") to player: %s (Account: %u)",
trader->GetName().c_str(), trader->GetSession()->GetAccountId(),
his_trade->GetMoney(),
_player->GetName().c_str(), _player->GetSession()->GetAccountId());
}
}
// update money
_player->ModifyMoney(-int64(my_trade->GetMoney()));
_player->ModifyMoney(his_trade->GetMoney());
trader->ModifyMoney(-int64(his_trade->GetMoney()));
trader->ModifyMoney(my_trade->GetMoney());
if (my_spell)
my_spell->prepare(&my_targets);
if (his_spell)
his_spell->prepare(&his_targets);
// cleanup
clearAcceptTradeMode(my_trade, his_trade);
delete _player->m_trade;
_player->m_trade = NULL;
delete trader->m_trade;
trader->m_trade = NULL;
// desynchronized with the other saves here (SaveInventoryAndGoldToDB() not have own transaction guards)
SQLTransaction trans = CharacterDatabase.BeginTransaction();
_player->SaveInventoryAndGoldToDB(trans);
trader->SaveInventoryAndGoldToDB(trans);
CharacterDatabase.CommitTransaction(trans);
info.Status = TRADE_STATUS_COMPLETE;
trader->GetSession()->SendTradeStatus(info);
SendTradeStatus(info);
}
else
{
info.Status = TRADE_STATUS_ACCEPTED;
trader->GetSession()->SendTradeStatus(info);
}
}
void WorldSession::HandleUnacceptTradeOpcode(WorldPackets::Trade::UnacceptTrade& /*unacceptTrade*/)
{
TradeData* my_trade = _player->GetTradeData();
if (!my_trade)
return;
my_trade->SetAccepted(false, true);
}
void WorldSession::HandleBeginTradeOpcode(WorldPackets::Trade::BeginTrade& /*beginTrade*/)
{
TradeData* my_trade = _player->m_trade;
if (!my_trade)
return;
WorldPackets::Trade::TradeStatus info;
info.Status = TRADE_STATUS_INITIATED;
my_trade->GetTrader()->GetSession()->SendTradeStatus(info);
SendTradeStatus(info);
}
void WorldSession::SendCancelTrade()
{
if (PlayerRecentlyLoggedOut() || PlayerLogout())
return;
WorldPackets::Trade::TradeStatus info;
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
}
void WorldSession::HandleCancelTradeOpcode(WorldPackets::Trade::CancelTrade& /*cancelTrade*/)
{
// sent also after LOGOUT COMPLETE
if (_player) // needed because STATUS_LOGGEDIN_OR_RECENTLY_LOGGOUT
_player->TradeCancel(true);
}
void WorldSession::HandleInitiateTradeOpcode(WorldPackets::Trade::InitiateTrade& initiateTrade)
{
if (GetPlayer()->m_trade)
return;
WorldPackets::Trade::TradeStatus info;
if (!GetPlayer()->IsAlive())
{
info.Status = TRADE_STATUS_DEAD;
SendTradeStatus(info);
return;
}
if (GetPlayer()->HasUnitState(UNIT_STATE_STUNNED))
{
info.Status = TRADE_STATUS_STUNNED;
SendTradeStatus(info);
return;
}
if (isLogingOut())
{
info.Status = TRADE_STATUS_LOGGING_OUT;
SendTradeStatus(info);
return;
}
if (GetPlayer()->IsInFlight())
{
info.Status = TRADE_STATUS_TOO_FAR_AWAY;
SendTradeStatus(info);
return;
}
if (GetPlayer()->getLevel() < sWorld->getIntConfig(CONFIG_TRADE_LEVEL_REQ))
{
SendNotification(GetTrinityString(LANG_TRADE_REQ), sWorld->getIntConfig(CONFIG_TRADE_LEVEL_REQ));
return;
}
Player* pOther = ObjectAccessor::FindPlayer(initiateTrade.Guid);
if (!pOther)
{
info.Status = TRADE_STATUS_NO_TARGET;
SendTradeStatus(info);
return;
}
if (pOther == GetPlayer() || pOther->m_trade)
{
info.Status = TRADE_STATUS_PLAYER_BUSY;
SendTradeStatus(info);
return;
}
if (!pOther->IsAlive())
{
info.Status = TRADE_STATUS_TARGET_DEAD;
SendTradeStatus(info);
return;
}
if (pOther->IsInFlight())
{
info.Status = TRADE_STATUS_TOO_FAR_AWAY;
SendTradeStatus(info);
return;
}
if (pOther->HasUnitState(UNIT_STATE_STUNNED))
{
info.Status = TRADE_STATUS_TARGET_STUNNED;
SendTradeStatus(info);
return;
}
if (pOther->GetSession()->isLogingOut())
{
info.Status = TRADE_STATUS_TARGET_LOGGING_OUT;
SendTradeStatus(info);
return;
}
if (pOther->GetSocial()->HasIgnore(GetPlayer()->GetGUID()))
{
info.Status = TRADE_STATUS_PLAYER_IGNORED;
SendTradeStatus(info);
return;
}
if ((pOther->GetTeam() != _player->GetTeam() ||
pOther->HasFlag(PLAYER_FLAGS_EX, PLAYER_FLAGS_EX_MERCENARY_MODE) ||
_player->HasFlag(PLAYER_FLAGS_EX, PLAYER_FLAGS_EX_MERCENARY_MODE)) &&
(!sWorld->getBoolConfig(CONFIG_ALLOW_TWO_SIDE_TRADE) &&
!HasPermission(rbac::RBAC_PERM_ALLOW_TWO_SIDE_TRADE)))
{
info.Status = TRADE_STATUS_WRONG_FACTION;
SendTradeStatus(info);
return;
}
if (!pOther->IsWithinDistInMap(_player, TRADE_DISTANCE, false))
{
info.Status = TRADE_STATUS_TOO_FAR_AWAY;
SendTradeStatus(info);
return;
}
if (pOther->getLevel() < sWorld->getIntConfig(CONFIG_TRADE_LEVEL_REQ))
{
SendNotification(GetTrinityString(LANG_TRADE_OTHER_REQ), sWorld->getIntConfig(CONFIG_TRADE_LEVEL_REQ));
return;
}
// OK start trade
_player->m_trade = new TradeData(_player, pOther);
pOther->m_trade = new TradeData(pOther, _player);
info.Status = TRADE_STATUS_PROPOSED;
info.Partner = _player->GetGUID();
pOther->GetSession()->SendTradeStatus(info);
}
void WorldSession::HandleSetTradeGoldOpcode(WorldPackets::Trade::SetTradeGold& setTradeGold)
{
TradeData* my_trade = _player->GetTradeData();
if (!my_trade)
return;
my_trade->UpdateClientStateIndex();
my_trade->SetMoney(setTradeGold.Coinage);
}
void WorldSession::HandleSetTradeItemOpcode(WorldPackets::Trade::SetTradeItem& setTradeItem)
{
TradeData* my_trade = _player->GetTradeData();
if (!my_trade)
return;
WorldPackets::Trade::TradeStatus info;
// invalid slot number
if (setTradeItem.TradeSlot >= TRADE_SLOT_COUNT)
{
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
return;
}
// check cheating, can't fail with correct client operations
Item* item = _player->GetItemByPos(setTradeItem.PackSlot, setTradeItem.ItemSlotInPack);
if (!item || (setTradeItem.TradeSlot != TRADE_SLOT_NONTRADED && !item->CanBeTraded(false, true)))
{
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
return;
}
ObjectGuid iGUID = item->GetGUID();
// prevent place single item into many trade slots using cheating and client bugs
if (my_trade->HasItem(iGUID))
{
// cheating attempt
info.Status = TRADE_STATUS_CANCELLED;
SendTradeStatus(info);
return;
}
my_trade->UpdateClientStateIndex();
if (setTradeItem.TradeSlot != TRADE_SLOT_NONTRADED && item->IsBindedNotWith(my_trade->GetTrader()))
{
info.Status = TRADE_STATUS_NOT_ON_TAPLIST;
info.TradeSlot = setTradeItem.TradeSlot;
SendTradeStatus(info);
return;
}
my_trade->SetItem(TradeSlots(setTradeItem.TradeSlot), item);
}
void WorldSession::HandleClearTradeItemOpcode(WorldPackets::Trade::ClearTradeItem& clearTradeItem)
{
TradeData* my_trade = _player->m_trade;
if (!my_trade)
return;
my_trade->UpdateClientStateIndex();
// invalid slot number
if (clearTradeItem.TradeSlot >= TRADE_SLOT_COUNT)
return;
my_trade->SetItem(TradeSlots(clearTradeItem.TradeSlot), NULL);
}
void WorldSession::HandleSetTradeCurrencyOpcode(WorldPackets::Trade::SetTradeCurrency& /*setTradeCurrency*/)
{
}
| gpl-2.0 |
hellsgod/hells-Core-N5 | fs/btrfs/inode.c | 12 | 207057 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
#include "tree-log.h"
#include "volumes.h"
#include "compression.h"
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
struct btrfs_iget_args {
u64 ino;
struct btrfs_root *root;
};
static const struct inode_operations btrfs_dir_inode_operations;
static const struct inode_operations btrfs_symlink_inode_operations;
static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
[S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
};
static int btrfs_setsize(struct inode *inode, loff_t newsize);
static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
int err;
err = btrfs_init_acl(trans, inode, dir);
if (!err)
err = btrfs_xattr_security_init(trans, inode, dir, qstr);
return err;
}
/*
* this does all the hard work for inserting an inline extent into
* the btree. The caller should have done a btrfs_drop_extents so that
* no overlapping inline items exist in the btree
*/
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size,
int compress_type,
struct page **compressed_pages)
{
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct page *page = NULL;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
int err = 0;
int ret;
size_t cur_size = size;
size_t datasize;
unsigned long offset;
if (compressed_size && compressed_pages)
cur_size = compressed_size;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
key.objectid = btrfs_ino(inode);
key.offset = start;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(cur_size);
inode_add_bytes(inode, size);
ret = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (ret) {
err = ret;
goto fail;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, size);
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
struct page *cpage;
int i = 0;
while (compressed_size > 0) {
cpage = compressed_pages[i];
cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr);
i++;
ptr += cur_size;
compressed_size -= cur_size;
}
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
page_cache_release(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
/*
* we're an inline extent, so nobody can
* extend the file past i_size without locking
* a page we already have locked.
*
* We must do any isize and inode updates
* before we unlock the pages. Otherwise we
* could end up racing with unlink.
*/
BTRFS_I(inode)->disk_i_size = inode->i_size;
ret = btrfs_update_inode(trans, root, inode);
return ret;
fail:
btrfs_free_path(path);
return err;
}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
*/
static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end,
size_t compressed_size, int compress_type,
struct page **compressed_pages)
{
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
u64 aligned_end = (end + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
u64 hint_byte;
u64 data_len = inline_len;
int ret;
if (compressed_size)
data_len = compressed_size;
if (start > 0 ||
actual_end >= PAGE_CACHE_SIZE ||
data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
end + 1 < isize ||
data_len > root->fs_info->max_inline) {
return 1;
}
ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
if (ret)
return ret;
if (isize > actual_end)
inline_len = min_t(u64, isize, actual_end);
ret = insert_inline_extent(trans, root, inode, start,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
}
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
struct page **pages;
unsigned long nr_pages;
int compress_type;
struct list_head list;
};
struct async_cow {
struct inode *inode;
struct btrfs_root *root;
struct page *locked_page;
u64 start;
u64 end;
struct list_head extents;
struct btrfs_work work;
};
static noinline int add_async_extent(struct async_cow *cow,
u64 start, u64 ram_size,
u64 compressed_size,
struct page **pages,
unsigned long nr_pages,
int compress_type)
{
struct async_extent *async_extent;
async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
BUG_ON(!async_extent); /* -ENOMEM */
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
async_extent->pages = pages;
async_extent->nr_pages = nr_pages;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
}
/*
* we create compressed extents in two phases. The first
* phase compresses a range of pages that have already been
* locked (both pages and state bits are locked).
*
* This is done inside an ordered work queue, and the compression
* is spread across many cpus. The actual IO submission is step
* two, and the ordered work queue takes care of making sure that
* happens in the same order things were put onto the queue by
* writepages and friends.
*
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
* are written in the same order that pdflush sent them down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end,
struct async_cow *async_cow,
int *num_added)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 num_bytes;
u64 blocksize = root->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0;
struct page **pages = NULL;
unsigned long nr_pages;
unsigned long nr_pages_ret = 0;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned long max_compressed = 128 * 1024;
unsigned long max_uncompressed = 128 * 1024;
int i;
int will_compress;
int compress_type = root->fs_info->compress_type;
/* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
/*
* we don't want to send crud past the end of i_size through
* compression, that's just a waste of CPU time. So, if the
* end of the file is before the start of our current
* requested range of bytes, we bail out to the uncompressed
* cleanup code that can deal with all of this.
*
* It isn't really the fastest way to fix things, but this is a
* very uncommon corner.
*/
if (actual_end <= start)
goto cleanup_and_bail_uncompressed;
total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress
* an extent is reasonable, so we limit the total size in ram
* of a compressed extent to 128k. This is a crucial number
* because it also controls how easily we can spread reads across
* cpus for decompression.
*
* We also want to make sure the amount of IO required to do
* a random read is reasonably small, so we limit the size of
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
/*
* we do compression for mount -o compress and when the
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
(btrfs_test_opt(root, COMPRESS) ||
(BTRFS_I(inode)->force_compress) ||
(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
goto cont;
}
if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress;
ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start,
total_compressed, pages,
nr_pages, &nr_pages_ret,
&total_in,
&total_compressed,
max_compressed);
if (!ret) {
unsigned long offset = total_compressed &
(PAGE_CACHE_SIZE - 1);
struct page *page = pages[nr_pages_ret - 1];
char *kaddr;
/* zero the tail end of the last page, we might be
* sending it down to disk
*/
if (offset) {
kaddr = kmap_atomic(page);
memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset);
kunmap_atomic(kaddr);
}
will_compress = 1;
}
}
cont:
if (start == 0) {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto cleanup_and_out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
/* lets try to make an inline extent */
if (ret || total_in < (actual_end - start)) {
/* we didn't compress the entire range, try
* to make an uncompressed inline extent.
*/
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
} else {
/* try making a compressed inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end,
total_compressed,
compress_type, pages);
}
if (ret <= 0) {
/*
* inline extent creation worked or returned error,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
btrfs_end_transaction(trans, root);
goto free_pages_out;
}
btrfs_end_transaction(trans, root);
}
if (will_compress) {
/*
* we aren't doing an inline extent round the compressed size
* up to a block size boundary so the allocator does sane
* things
*/
total_compressed = (total_compressed + blocksize - 1) &
~(blocksize - 1);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
total_in = (total_in + PAGE_CACHE_SIZE - 1) &
~(PAGE_CACHE_SIZE - 1);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
num_bytes = total_in;
}
}
if (!will_compress && pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
*/
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
pages = NULL;
total_compressed = 0;
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
if (will_compress) {
*num_added += 1;
/* the async work queues will take care of doing actual
* allocation on disk for these compressed pages,
* and will submit them to the elevator.
*/
add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret,
compress_type);
if (start + num_bytes < end) {
start += num_bytes;
pages = NULL;
cond_resched();
goto again;
}
} else {
cleanup_and_bail_uncompressed:
/*
* No compression, but we still need to write the pages in
* the file we've been given so far. redirty the locked
* page if it corresponds to our extent and set things up
* for the async work queue to run cow_file_range to do
* the normal delalloc dance
*/
if (page_offset(locked_page) >= start &&
page_offset(locked_page) <= end) {
__set_page_dirty_nobuffers(locked_page);
/* unlocked later on in the async handlers */
}
add_async_extent(async_cow, start, end - start + 1,
0, NULL, 0, BTRFS_COMPRESS_NONE);
*num_added += 1;
}
out:
return ret;
free_pages_out:
for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]);
}
kfree(pages);
goto out;
cleanup_and_out:
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_DIRTY |
EXTENT_CLEAR_DELALLOC |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
if (!trans || IS_ERR(trans))
btrfs_error(root->fs_info, ret, "Failed to join transaction");
else
btrfs_abort_transaction(trans, root, ret);
goto free_pages_out;
}
/*
* phase two of compressed writeback. This is the ordered portion
* of the code, which only gets called in the order the work was
* queued. We walk all the async extents created by compress_file_range
* and send them down to the disk.
*/
static noinline int submit_compressed_extents(struct inode *inode,
struct async_cow *async_cow)
{
struct async_extent *async_extent;
u64 alloc_hint = 0;
struct btrfs_trans_handle *trans;
struct btrfs_key ins;
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree;
int ret = 0;
if (list_empty(&async_cow->extents))
return 0;
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
io_tree = &BTRFS_I(inode)->io_tree;
retry:
/* did the compression code fall back to uncompressed IO? */
if (!async_extent->pages) {
int page_started = 0;
unsigned long nr_written = 0;
lock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
/* JDM XXX */
/*
* if page_started, cow_file_range inserted an
* inline extent and took care of all the unlocking
* and IO for us. Otherwise, we need to submit
* all those pages down to the drive.
*/
if (!page_started && !ret)
extent_write_locked_range(io_tree,
inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
kfree(async_extent);
cond_resched();
continue;
}
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1);
if (ret)
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
}
if (ret) {
int i;
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
if (ret == -ENOSPC)
goto retry;
goto out_free; /* JDM: Requeue? */
}
/*
* here we're doing allocation and writeback of the
* compressed pages
*/
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1, 0);
}
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
async_extent->ram_size,
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
BUG_ON(ret); /* -ENOMEM */
/*
* clear dirty, set writeback and unlock the pages.
*/
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
ret = btrfs_submit_compressed_write(inode,
async_extent->start,
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
ret = 0;
out:
return ret;
out_free:
kfree(async_extent);
goto out;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
u64 num_bytes)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
u64 alloc_hint = 0;
read_lock(&em_tree->lock);
em = search_extent_mapping(em_tree, start, num_bytes);
if (em) {
/*
* if block start isn't an actual block number then find the
* first block in this inode and use that as a hint. If that
* block is also bogus then just don't worry about it.
*/
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
free_extent_map(em);
em = search_extent_mapping(em_tree, 0, 0);
if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
alloc_hint = em->block_start;
if (em)
free_extent_map(em);
} else {
alloc_hint = em->block_start;
free_extent_map(em);
}
}
read_unlock(&em_tree->lock);
return alloc_hint;
}
/*
* when extent_io.c finds a delayed allocation range in the file,
* the call backs end up in this code. The basic idea is to
* allocate extents on disk for the range, and create ordered data structs
* in ram to track those extents.
*
* locked_page is the page that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
* *page_started is set to one if we unlock locked_page and do everything
* required to start IO on it. It may be clean and already done with
* IO when we return.
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written,
int unlock)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
u64 cur_alloc_size;
u64 blocksize = root->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
BUG_ON(btrfs_is_free_space_inode(root, inode));
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
ret = 0;
/* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(trans, inode);
if (start == 0) {
/* lets try to make an inline extent */
ret = cow_file_range_inline(trans, root, inode,
start, end, 0, 0, NULL);
if (ret == 0) {
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
goto out;
} else if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
BUG_ON(disk_num_bytes >
btrfs_super_total_bytes(root->fs_info->super_copy));
alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
unsigned long op;
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
root->sectorsize, 0, alloc_hint,
&ins, 1);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, start,
start + ram_size - 1, 0);
}
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
}
if (disk_num_bytes < cur_alloc_size)
break;
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, start + ram_size - 1,
locked_page, op);
disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
ret = 0;
out:
btrfs_end_transaction(trans, root);
return ret;
out_unlock:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL,
EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
goto out;
}
/*
* work queue call back to started compression on a file and pages
*/
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_cow *async_cow;
int num_added = 0;
async_cow = container_of(work, struct async_cow, work);
compress_file_range(async_cow->inode, async_cow->locked_page,
async_cow->start, async_cow->end, async_cow,
&num_added);
if (num_added == 0)
async_cow->inode = NULL;
}
/*
* work queue call back to submit previously compressed pages
*/
static noinline void async_cow_submit(struct btrfs_work *work)
{
struct async_cow *async_cow;
struct btrfs_root *root;
unsigned long nr_pages;
async_cow = container_of(work, struct async_cow, work);
root = async_cow->root;
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
if (atomic_read(&root->fs_info->async_delalloc_pages) <
5 * 1042 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
if (async_cow->inode)
submit_compressed_extents(async_cow->inode, async_cow);
}
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work);
kfree(async_cow);
}
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
struct async_cow *async_cow;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
int limit = 10 * 1024 * 1042;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = inode;
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
async_cow->end = cur_end;
INIT_LIST_HEAD(&async_cow->extents);
async_cow->work.func = async_cow_start;
async_cow->work.ordered_func = async_cow_submit;
async_cow->work.ordered_free = async_cow_free;
async_cow->work.flags = 0;
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
btrfs_queue_worker(&root->fs_info->delalloc_workers,
&async_cow->work);
if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) <
limit));
}
while (atomic_read(&root->fs_info->async_submit_draining) &&
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages) ==
0));
}
*nr_written += nr_pages;
start = cur_end + 1;
}
*page_started = 1;
return 0;
}
static noinline int csum_exist_in_range(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
bytenr + num_bytes - 1, &list, 0);
if (ret == 0 && list_empty(&list))
return 0;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
list_del(&sums->list);
kfree(sums);
}
return 1;
}
/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
*
* If no cow copies or snapshots exist, we write directly to the existing
* blocks on disk
*/
static noinline int run_delalloc_nocow(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started, int force,
unsigned long *nr_written)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key found_key;
u64 cow_start;
u64 cur_offset;
u64 extent_end;
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
int extent_type;
int ret, err;
int type;
int nocow;
int check_prev = 1;
bool nolock;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
cow_start = (u64)-1;
cur_offset = start;
while (1) {
ret = btrfs_lookup_file_extent(trans, root, path, ino,
cur_offset, 0);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0 && path->slots[0] > 0 && check_prev) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0] - 1);
if (found_key.objectid == ino &&
found_key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
check_prev = 0;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
if (ret > 0)
break;
leaf = path->nodes[0];
}
nocow = 0;
disk_bytenr = 0;
num_bytes = 0;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid > ino ||
found_key.type > BTRFS_EXTENT_DATA_KEY ||
found_key.offset > end)
break;
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto out_check;
}
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (disk_bytenr == 0)
goto out_check;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
extent_offset, disk_bytenr))
goto out_check;
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out_check;
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
btrfs_file_extent_inline_len(leaf, fi);
extent_end = ALIGN(extent_end, root->sectorsize);
} else {
BUG_ON(1);
}
out_check:
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
}
if (!nocow) {
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
if (cur_offset > end)
break;
path->slots[0]++;
goto next_slot;
}
btrfs_release_path(path);
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start,
found_key.offset - 1, page_started,
nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
cow_start = (u64)-1;
}
if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
struct extent_map *em;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
em->orig_start = em->start;
em->len = num_bytes;
em->block_len = num_bytes;
em->block_start = disk_bytenr;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
}
type = BTRFS_ORDERED_PREALLOC;
} else {
type = BTRFS_ORDERED_NOCOW;
}
ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
num_bytes, num_bytes, type);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
cur_offset, cur_offset + num_bytes - 1,
locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
EXTENT_SET_PRIVATE2);
cur_offset = extent_end;
if (cur_offset > end)
break;
}
btrfs_release_path(path);
if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page, cow_start, end,
page_started, nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
}
}
error:
if (nolock) {
err = btrfs_end_transaction_nolock(trans, root);
} else {
err = btrfs_end_transaction(trans, root);
}
if (!ret)
ret = err;
btrfs_free_path(path);
return ret;
}
/*
* extent_io.c call back to do delayed allocation processing
*/
static int run_delalloc_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
else if (!btrfs_test_opt(root, COMPRESS) &&
!(BTRFS_I(inode)->force_compress) &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
else
ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written);
return ret;
}
static void btrfs_split_extent_hook(struct inode *inode,
struct extent_state *orig, u64 split)
{
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c merge_extent_hook, used to track merged delayed allocation
* extents so we can keep track of new extents that are just merged onto old
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
static void btrfs_merge_extent_hook(struct inode *inode,
struct extent_state *new,
struct extent_state *other)
{
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
/*
* extent_io.c set_bit_hook, used to track delayed allocation
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
spin_lock(&root->fs_info->delalloc_lock);
BTRFS_I(inode)->delalloc_bytes += len;
root->fs_info->delalloc_bytes += len;
if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
&root->fs_info->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
* bit, which is only set or cleared with irqs on
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(root, inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
}
if (*bits & EXTENT_DO_ACCOUNTING)
btrfs_delalloc_release_metadata(inode, len);
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list)
btrfs_free_reserved_data_space(inode, len);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes -= len;
BTRFS_I(inode)->delalloc_bytes -= len;
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
}
spin_unlock(&root->fs_info->delalloc_lock);
}
}
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
struct btrfs_mapping_tree *map_tree;
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
if (bio_flags & EXTENT_BIO_COMPRESSED)
return 0;
length = bio->bi_size;
map_tree = &root->fs_info->mapping_tree;
map_length = length;
ret = btrfs_map_block(map_tree, READ, logical,
&map_length, NULL, 0);
/* Will always return 0 or 1 with map_multi == NULL */
BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_start(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
/*
* in order to insert checksums into the metadata in large chunks,
* we wait until bio submission time. All the pages in the bio are
* checksummed and sums are attached onto the ordered extent record.
*
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
return btrfs_map_bio(root, rw, bio, mirror_num, 1);
}
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read
*/
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
int skip_sum;
int metadata = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
if (btrfs_is_free_space_inode(root, inode))
metadata = 2;
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
return ret;
if (!(rw & REQ_WRITE)) {
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret)
return ret;
}
goto mapit;
} else if (!skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
}
mapit:
return btrfs_map_bio(root, rw, bio, mirror_num, 0);
}
/*
* given a list of ordered sums record them in the inode. This happens
* at IO completion time based on sums calculated at bio submission time.
*/
static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_offset,
struct list_head *list)
{
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
}
return 0;
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
WARN_ON(1);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
/* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup {
struct page *page;
struct btrfs_work work;
};
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct page *page;
struct inode *inode;
u64 page_start;
u64 page_end;
int ret;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
again:
lock_page(page);
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
ClearPageChecked(page);
goto out_page;
}
inode = page->mapping->host;
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
&cached_state);
/* already ordered? We're done */
if (PagePrivate2(page))
goto out;
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
page_end, &cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret) {
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
ClearPageChecked(page);
goto out;
}
btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
ClearPageChecked(page);
set_page_dirty(page);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
out_page:
unlock_page(page);
page_cache_release(page);
kfree(fixup);
}
/*
* There are a few paths in the higher layers of the kernel that directly
* set the page dirty bit without asking the filesystem if it is a
* good idea. This causes problems because we want to make sure COW
* properly happens and the data=ordered rules are followed.
*
* In our case any range that doesn't have the ORDERED bit set
* hasn't been properly setup for IO. We kick off an async process
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root;
/* this page is properly in the ordered list */
if (TestClearPagePrivate2(page))
return 0;
if (PageChecked(page))
return -EAGAIN;
fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
if (!fixup)
return -EAGAIN;
SetPageChecked(page);
page_cache_get(page);
fixup->work.func = btrfs_writepage_fixup_worker;
fixup->page = page;
btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
return -EBUSY;
}
static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
u64 hint;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
&hint, 0);
if (ret)
goto out;
ins.objectid = btrfs_ino(inode);
ins.offset = file_pos;
ins.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
if (ret)
goto out;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_type(leaf, fi, extent_type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
btrfs_set_file_extent_compression(leaf, fi, compression);
btrfs_set_file_extent_encryption(leaf, fi, encryption);
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
btrfs_unlock_up_safe(path, 1);
btrfs_set_lock_blocking(leaf);
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, num_bytes);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos, &ins);
out:
btrfs_free_path(path);
return ret;
}
/*
* helper function for btrfs_finish_ordered_io, this
* just reads in some of the csum leaves to prime them into ram
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
*/
static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
bool nolock;
ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
end - start + 1);
if (!ret)
return 0;
BUG_ON(!ordered_extent); /* Logic error */
nolock = btrfs_is_free_space_inode(root, inode);
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
}
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out_unlock;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset,
ordered_extent->len);
}
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out;
}
}
ret = 0;
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans) {
if (nolock)
btrfs_end_transaction_nolock(trans, root);
else
btrfs_end_transaction(trans, root);
}
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
return 0;
out_unlock:
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
goto out;
}
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
/*
* when reads are done, we need to check csums to verify the data is correct
* if there's a match, we allow the bio to finish. If not, the code in
* extent_io.c will try to find good copies for us.
*/
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
char *kaddr;
u64 private = ~(u32)0;
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
u32 csum = ~(u32)0;
if (PageChecked(page)) {
ClearPageChecked(page);
goto good;
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
goto good;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS);
return 0;
}
if (state && state->start == start) {
private = state->private;
ret = 0;
} else {
ret = get_state_private(io_tree, start, &private);
}
kaddr = kmap_atomic(page);
if (ret)
goto zeroit;
csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
btrfs_csum_final(csum, (char *)&csum);
if (csum != private)
goto zeroit;
kunmap_atomic(kaddr);
good:
return 0;
zeroit:
printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
"private %llu\n",
(unsigned long long)btrfs_ino(page->mapping->host),
(unsigned long long)start, csum,
(unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr);
if (private == 0)
return 0;
return -EIO;
}
struct delayed_iput {
struct list_head list;
struct inode *inode;
};
/* JDM: If this is fs-wide, why can't we add a pointer to
* btrfs_inode instead and avoid the allocation? */
void btrfs_add_delayed_iput(struct inode *inode)
{
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed;
if (atomic_add_unless(&inode->i_count, -1, 1))
return;
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
spin_lock(&fs_info->delayed_iput_lock);
list_add_tail(&delayed->list, &fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
}
void btrfs_run_delayed_iputs(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
struct delayed_iput *delayed;
int empty;
spin_lock(&fs_info->delayed_iput_lock);
empty = list_empty(&fs_info->delayed_iputs);
spin_unlock(&fs_info->delayed_iput_lock);
if (empty)
return;
down_read(&root->fs_info->cleanup_work_sem);
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
while (!list_empty(&list)) {
delayed = list_entry(list.next, struct delayed_iput, list);
list_del(&delayed->list);
iput(delayed->inode);
kfree(delayed);
}
up_read(&root->fs_info->cleanup_work_sem);
}
enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
};
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
* structure.
*/
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_rsv *block_rsv;
int ret;
if (!list_empty(&root->orphan_list) ||
root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
return;
spin_lock(&root->orphan_lock);
if (!list_empty(&root->orphan_list)) {
spin_unlock(&root->orphan_lock);
return;
}
if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
spin_unlock(&root->orphan_lock);
return;
}
block_rsv = root->orphan_block_rsv;
root->orphan_block_rsv = NULL;
spin_unlock(&root->orphan_lock);
if (root->orphan_item_inserted &&
btrfs_root_refs(&root->root_item) > 0) {
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
BUG_ON(ret);
root->orphan_item_inserted = 0;
}
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
btrfs_free_block_rsv(root, block_rsv);
}
}
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
*
* NOTE: caller of this function should reserve 5 units of metadata for
* this function.
*/
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = NULL;
int reserve = 0;
int insert = 0;
int ret;
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root);
if (!block_rsv)
return -ENOMEM;
}
spin_lock(&root->orphan_lock);
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
btrfs_free_block_rsv(root, block_rsv);
block_rsv = NULL;
}
if (list_empty(&BTRFS_I(inode)->i_orphan)) {
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
#if 0
/*
* For proper ENOSPC handling, we should do orphan
* cleanup when mounting. But this introduces backward
* compatibility issue.
*/
if (!xchg(&root->orphan_item_inserted, 1))
insert = 2;
else
insert = 1;
#endif
insert = 1;
}
if (!BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 1;
reserve = 1;
}
spin_unlock(&root->orphan_lock);
/* grab metadata reservation from transaction handle */
if (reserve) {
ret = btrfs_orphan_reserve_metadata(trans, inode);
BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
}
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
ret = 0;
}
/* insert an orphan item to track subvolume contains orphan files */
if (insert >= 2) {
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
}
return 0;
}
/*
* We have done the truncate/delete so we can go ahead and remove the orphan
* item for this particular inode.
*/
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
int release_rsv = 0;
int ret = 0;
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
list_del_init(&BTRFS_I(inode)->i_orphan);
delete_item = 1;
}
if (BTRFS_I(inode)->orphan_meta_reserved) {
BTRFS_I(inode)->orphan_meta_reserved = 0;
release_rsv = 1;
}
spin_unlock(&root->orphan_lock);
if (trans && delete_item) {
ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
}
if (release_rsv)
btrfs_orphan_release_metadata(inode);
return 0;
}
/*
* this cleans up any orphans that may be left on the list from the last use
* of this root.
*/
int btrfs_orphan_cleanup(struct btrfs_root *root)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key, found_key;
struct btrfs_trans_handle *trans;
struct inode *inode;
u64 last_objectid = 0;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
return 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/*
* if ret == 0 means we found what we were searching for, which
* is weird, but possible, so only screw with path if we didn't
* find the key and see if we have stuff that matches
*/
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
/* pull out the item */
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
/* make sure the item matches what we want */
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
break;
/* release the path since we're done with it */
btrfs_release_path(path);
/*
* this is where we are basically btrfs_lookup, without the
* crossing root thing. we store the inode number in the
* offset of the orphan item.
*/
if (found_key.offset == last_objectid) {
printk(KERN_ERR "btrfs: Error removing orphan entry, "
"stopping orphan cleanup\n");
ret = -EINVAL;
goto out;
}
last_objectid = found_key.offset;
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_RET(inode);
if (ret && ret != -ESTALE)
goto out;
if (ret == -ESTALE && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
/*
* this is an orphan in the tree root. Currently these
* could come from 2 sources:
* a) a snapshot deletion in progress
* b) a free space cache inode
* We need to distinguish those two, as the snapshot
* orphan must not get deleted.
* find_dead_roots already ran before us, so if this
* is a snapshot deletion, we should find the root
* in the dead_roots list
*/
spin_lock(&fs_info->trans_lock);
list_for_each_entry(dead_root, &fs_info->dead_roots,
root_list) {
if (dead_root->root_key.objectid ==
found_key.objectid) {
is_dead_root = 1;
break;
}
}
spin_unlock(&fs_info->trans_lock);
if (is_dead_root) {
/* prevent this orphan from being found again */
key.offset = found_key.objectid - 1;
continue;
}
}
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
if (ret == -ESTALE) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
btrfs_end_transaction(trans, root);
continue;
}
/*
* add this inode to the orphan list so btrfs_orphan_del does
* the proper thing when we hit it
*/
spin_lock(&root->orphan_lock);
list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
spin_unlock(&root->orphan_lock);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
if (!S_ISREG(inode->i_mode)) {
WARN_ON(1);
iput(inode);
continue;
}
nr_truncate++;
ret = btrfs_truncate(inode);
} else {
nr_unlink++;
}
/* this will do delete_inode and everything for us */
iput(inode);
if (ret)
goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
btrfs_block_rsv_release(root, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root);
if (!IS_ERR(trans))
btrfs_end_transaction(trans, root);
}
if (nr_unlink)
printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
if (nr_truncate)
printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
out:
if (ret)
printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
btrfs_free_path(path);
return ret;
}
/*
* very simple check to peek ahead in the leaf looking for xattrs. If we
* don't find any xattrs, we know there can't be any acls.
*
* slot is the slot the inode is in, objectid is the objectid of the inode
*/
static noinline int acls_after_inode_item(struct extent_buffer *leaf,
int slot, u64 objectid)
{
u32 nritems = btrfs_header_nritems(leaf);
struct btrfs_key found_key;
int scanned = 0;
slot++;
while (slot < nritems) {
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* we found a different objectid, there must not be acls */
if (found_key.objectid != objectid)
return 0;
/* we found an xattr, assume we've got an acl */
if (found_key.type == BTRFS_XATTR_ITEM_KEY)
return 1;
/*
* we found a key greater than an xattr key, there can't
* be any acls later on
*/
if (found_key.type > BTRFS_XATTR_ITEM_KEY)
return 0;
slot++;
scanned++;
/*
* it goes inode, inode backrefs, xattrs, extents,
* so if there are a ton of hard links to an inode there can
* be a lot of backrefs. Don't waste time searching too hard,
* this is just an optimization
*/
if (scanned >= 8)
break;
}
/* we hit the end of the leaf before we found an xattr or
* something larger than an xattr. We have to assume the inode
* has acls
*/
return 1;
}
/*
* read an inode from the btree into the in-memory inode
*/
static void btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_timespec *tspec;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key location;
int maybe_acls;
u32 rdev;
int ret;
bool filled = false;
ret = btrfs_fill_inode(inode, &rdev);
if (!ret)
filled = true;
path = btrfs_alloc_path();
if (!path)
goto make_bad;
path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret)
goto make_bad;
leaf = path->nodes[0];
if (filled)
goto cache_acl;
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
inode->i_mode = btrfs_inode_mode(leaf, inode_item);
set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
inode->i_uid = btrfs_inode_uid(leaf, inode_item);
inode->i_gid = btrfs_inode_gid(leaf, inode_item);
btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
tspec = btrfs_inode_atime(inode_item);
inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_mtime(inode_item);
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
tspec = btrfs_inode_ctime(inode_item);
inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
BTRFS_I(inode)->index_cnt = (u64)-1;
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
cache_acl:
/*
* try to precache a NULL acl entry for files that don't have
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0],
btrfs_ino(inode));
if (!maybe_acls)
cache_no_acl(inode);
btrfs_free_path(path);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
if (root == root->fs_info->tree_root)
inode->i_op = &btrfs_dir_ro_inode_operations;
else
inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
}
btrfs_update_iflags(inode);
return;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
}
/*
* given a leaf and an inode, copy the inode fields into the leaf
*/
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
struct inode *inode)
{
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
btrfs_set_inode_mode(leaf, item, inode->i_mode);
btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
inode->i_atime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
inode->i_mtime.tv_nsec);
btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_sec);
btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
inode->i_ctime.tv_nsec);
btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
btrfs_set_inode_transid(leaf, item, trans->transid);
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, 0);
}
/*
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
goto failed;
}
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
btrfs_free_path(path);
return ret;
}
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
/*
* If the inode is a free space inode, we can deadlock during commit
* if we put it into the delayed code.
*
* The data relocation inode should also be directly updated
* without delay
*/
if (!btrfs_is_free_space_inode(root, inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
}
static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return ret;
}
/*
* unlink helper that gets used here in inode.c and in the tree logging
* recovery code. It remove a link in a directory with a given name, and
* also drops the back refs in the inode to the directory
*/
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
struct btrfs_path *path;
int ret = 0;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR(di)) {
ret = PTR_ERR(di);
goto err;
}
if (!di) {
ret = -ENOENT;
goto err;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto err;
btrfs_release_path(path);
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
"inode %llu parent %llu\n", name_len, name,
(unsigned long long)ino, (unsigned long long)dir_ino);
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto err;
}
ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
dir, index);
if (ret == -ENOENT)
ret = 0;
err:
btrfs_free_path(path);
if (ret)
goto out;
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
btrfs_update_inode(trans, root, dir);
out:
return ret;
}
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, struct inode *inode,
const char *name, int name_len)
{
int ret;
ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
if (!ret) {
btrfs_drop_nlink(inode);
ret = btrfs_update_inode(trans, root, inode);
}
return ret;
}
/* helper to check if there is any shared block in the path */
static int check_path_shared(struct btrfs_root *root,
struct btrfs_path *path)
{
struct extent_buffer *eb;
int level;
u64 refs = 1;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
int ret;
if (!path->nodes[level])
break;
eb = path->nodes[level];
if (!btrfs_block_can_be_shared(root, eb))
continue;
ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
&refs, NULL);
if (refs > 1)
return 1;
}
return 0;
}
/*
* helper to start transaction for unlink and rmdir.
*
* unlink and rmdir are special in btrfs, they do not always free space.
* so in enospc case, we should make sure they will free space before
* allowing them to use the global metadata reservation.
*/
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_dir_item *di;
struct inode *inode = dentry->d_inode;
u64 index;
int check_link = 1;
int err = -ENOSPC;
int ret;
u64 ino = btrfs_ino(inode);
u64 dir_ino = btrfs_ino(dir);
/*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode ref in the tree log
* 2 for the dir entries in the log
* 1 for the inode
*/
trans = btrfs_start_transaction(root, 8);
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
return trans;
if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return ERR_PTR(-ENOSPC);
/* check if there is someone else holds reference */
if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
return ERR_PTR(-ENOSPC);
if (atomic_read(&inode->i_count) > 2)
return ERR_PTR(-ENOSPC);
if (xchg(&root->fs_info->enospc_unlink, 1))
return ERR_PTR(-ENOSPC);
path = btrfs_alloc_path();
if (!path) {
root->fs_info->enospc_unlink = 0;
return ERR_PTR(-ENOMEM);
}
/* 1 for the orphan item */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_free_path(path);
root->fs_info->enospc_unlink = 0;
return trans;
}
path->skip_locking = 1;
path->search_commit_root = 1;
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(dir)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
ret = btrfs_lookup_inode(trans, root, path,
&BTRFS_I(inode)->location, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret == 0) {
if (check_path_shared(root, path))
goto out;
} else {
check_link = 0;
}
btrfs_release_path(path);
if (ret == 0 && S_ISREG(inode->i_mode)) {
ret = btrfs_lookup_file_extent(trans, root, path,
ino, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
}
BUG_ON(ret == 0); /* Corruption */
if (check_path_shared(root, path))
goto out;
btrfs_release_path(path);
}
if (!check_link) {
err = 0;
goto out;
}
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
if (di) {
if (check_path_shared(root, path))
goto out;
} else {
err = 0;
goto out;
}
btrfs_release_path(path);
ref = btrfs_lookup_inode_ref(trans, root, path,
dentry->d_name.name, dentry->d_name.len,
ino, dir_ino, 0);
if (IS_ERR(ref)) {
err = PTR_ERR(ref);
goto out;
}
BUG_ON(!ref); /* Logic error */
if (check_path_shared(root, path))
goto out;
index = btrfs_inode_ref_index(path->nodes[0], ref);
btrfs_release_path(path);
/*
* This is a commit root search, if we can lookup inode item and other
* relative items in the commit root, it means the transaction of
* dir/file creation has been committed, and the dir index item that we
* delay to insert has also been inserted into the commit root. So
* we needn't worry about the delayed insertion of the dir index item
* here.
*/
di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
dentry->d_name.name, dentry->d_name.len, 0);
if (IS_ERR(di)) {
err = PTR_ERR(di);
goto out;
}
BUG_ON(ret == -ENOENT);
if (check_path_shared(root, path))
goto out;
err = 0;
out:
btrfs_free_path(path);
/* Migrate the orphan reservation over */
if (!err)
err = btrfs_block_rsv_migrate(trans->block_rsv,
&root->fs_info->global_block_rsv,
trans->bytes_reserved);
if (err) {
btrfs_end_transaction(trans, root);
root->fs_info->enospc_unlink = 0;
return ERR_PTR(err);
}
trans->block_rsv = &root->fs_info->global_block_rsv;
return trans;
}
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
if (trans->block_rsv == &root->fs_info->global_block_rsv) {
btrfs_block_rsv_release(root, trans->block_rsv,
trans->bytes_reserved);
trans->block_rsv = &root->fs_info->trans_block_rsv;
BUG_ON(!root->fs_info->enospc_unlink);
root->fs_info->enospc_unlink = 0;
}
btrfs_end_transaction(trans, root);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
struct inode *inode = dentry->d_inode;
int ret;
unsigned long nr = 0;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (ret)
goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
if (ret)
goto out;
}
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return ret;
}
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, u64 objectid,
const char *name, int name_len)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
u64 index;
int ret;
u64 dir_ino = btrfs_ino(dir);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
goto out;
}
leaf = path->nodes[0];
btrfs_dir_item_key_to_cpu(leaf, di, &key);
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
if (IS_ERR_OR_NULL(di)) {
if (!di)
ret = -ENOENT;
else
ret = PTR_ERR(di);
btrfs_abort_transaction(trans, root, ret);
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
err = btrfs_unlink_subvol(trans, root, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
goto out;
}
err = btrfs_orphan_add(trans, inode);
if (err)
goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/*
* this can truncate away extent items, csum items and directory items.
* It starts at a high offset and removes keys until it can't find
* any higher than new_size
*
* csum items that cross the new i_size are truncated to the new size
* as well.
*
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
u64 new_size, u32 min_type)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
u64 mask = root->sectorsize - 1;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int ret;
int err = 0;
u64 ino = btrfs_ino(inode);
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
* it is used to drop the loged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
search_again:
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
}
while (1) {
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != ino)
break;
if (found_type < min_type)
break;
item_end = found_key.offset;
if (found_type == BTRFS_EXTENT_DATA_KEY) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
fi);
}
item_end--;
}
if (found_type > min_type) {
del_item = 1;
} else {
if (item_end < new_size)
break;
if (found_key.offset >= new_size)
del_item = 1;
else
del_item = 0;
}
found_extent = 0;
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
extent_num_bytes = new_size -
found_key.offset + root->sectorsize - 1;
extent_num_bytes = extent_num_bytes &
~((u64)root->sectorsize - 1);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
if (root->ref_cows && extent_start != 0)
inode_sub_bytes(inode, num_dec);
btrfs_mark_buffer_dirty(leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf,
fi);
extent_offset = found_key.offset -
btrfs_file_extent_offset(leaf, fi);
/* FIXME blocksize != 4096 */
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
if (root->ref_cows)
inode_sub_bytes(inode, num_dec);
}
}
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
/*
* we can't truncate inline items that have had
* special encodings
*/
if (!del_item &&
btrfs_file_extent_compression(leaf, fi) == 0 &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
btrfs_file_extent_other_encoding(leaf, fi) == 0) {
u32 size = new_size - found_key.offset;
if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
new_size);
}
size =
btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(trans, root, path,
size, 1);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
}
}
delete:
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
/* hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
} else {
BUG();
}
} else {
break;
}
if (found_extent && (root->ref_cows ||
root == root->fs_info->tree_root)) {
btrfs_set_path_blocking(path);
ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset, 0);
BUG_ON(ret);
}
if (found_type == BTRFS_INODE_ITEM_KEY)
break;
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
if (root->ref_cows &&
BTRFS_I(inode)->location.objectid !=
BTRFS_FREE_INO_OBJECTID) {
err = -EAGAIN;
goto out;
}
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
pending_del_nr);
if (ret) {
btrfs_abort_transaction(trans,
root, ret);
goto error;
}
pending_del_nr = 0;
}
btrfs_release_path(path);
goto search_again;
} else {
path->slots[0]--;
}
}
out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
error:
btrfs_free_path(path);
return err;
}
/*
* taken from block_truncate_page, but does cow as it zeros out
* any bytes left in the last page in the file.
*/
static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
u32 blocksize = root->sectorsize;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
struct page *page;
gfp_t mask = btrfs_alloc_write_mask(mapping);
int ret = 0;
u64 page_start;
u64 page_end;
if ((offset & (blocksize - 1)) == 0)
goto out;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret)
goto out;
ret = -ENOMEM;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
goto out;
}
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
page_cache_release(page);
goto again;
}
if (!PageUptodate(page)) {
ret = -EIO;
goto out_unlock;
}
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
goto out_unlock;
}
ret = 0;
if (offset != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
GFP_NOFS);
out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
out:
return ret;
}
/*
* This function puts in dummy file extents for the area we're creating a hole
* for. So if we are truncating this file to a larger size we need to insert
* these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
* the range between oldsize and size
*/
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 mask = root->sectorsize - 1;
u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
u64 last_byte;
u64 cur_offset;
u64 hole_size;
int err = 0;
if (size <= hole_start)
return 0;
while (1) {
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start);
lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
&cached_state);
ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
&cached_state, GFP_NOFS);
btrfs_put_ordered_extent(ordered);
}
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
block_end - cur_offset, 0);
if (IS_ERR(em)) {
err = PTR_ERR(em);
break;
}
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
}
err = btrfs_drop_extents(trans, inode, cur_offset,
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
err = btrfs_insert_file_extent(trans, root,
btrfs_ino(inode), cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
em = NULL;
cur_offset = last_byte;
if (cur_offset >= block_end)
break;
}
free_extent_map(em);
unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
GFP_NOFS);
return err;
}
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
if (newsize == oldsize)
return 0;
if (newsize > oldsize) {
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret)
return ret;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
i_size_write(inode, newsize);
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
ret = btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
} else {
/*
* We're truncating a file that used to have good data down to
* zero. Make sure it gets into the ordered flush list so that
* any new writes get down to disk quickly.
*/
if (newsize == 0)
BTRFS_I(inode)->ordered_data_close = 1;
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
ret = btrfs_truncate(inode);
}
return ret;
}
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
return err;
}
void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
unsigned long nr;
int ret;
trace_btrfs_inode_evict(inode);
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
btrfs_is_free_space_inode(root, inode)))
goto no_delete;
if (is_bad_inode(inode)) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
goto no_delete;
}
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
goto no_delete;
}
rsv = btrfs_alloc_block_rsv(root);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
global_rsv = &root->fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
/*
* This is a bit simpler than btrfs_truncate since
*
* 1) We've already reserved our space for our orphan item in the
* unlink.
* 2) We're going to delete the inode item, so we don't need to update
* it at all.
*
* So we just need to reserve some slack space in case we add bytes when
* doing the truncate.
*/
while (1) {
ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
* likely not use this space anyway, we want to try as
* hard as possible to get this to work.
*/
if (ret)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
if (ret) {
printk(KERN_WARNING "Could not get space for a "
"delete, will truncate on mount %d\n", ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
if (ret != -EAGAIN)
break;
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
btrfs_free_block_rsv(root, rsv);
if (ret == 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
BUG_ON(ret);
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
if (!(root == root->fs_info->tree_root ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
clear_inode(inode);
return;
}
/*
* this returns the key found in the dir entry in the location pointer.
* If no dir entries were found, location->objectid is 0.
*/
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
struct btrfs_key *location)
{
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct btrfs_dir_item *di;
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
if (IS_ERR(di))
ret = PTR_ERR(di);
if (IS_ERR_OR_NULL(di))
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
out:
btrfs_free_path(path);
return ret;
out_err:
location->objectid = 0;
goto out;
}
/*
* when we hit a tree root in a directory, the btrfs part of the inode
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
err = -ENOENT;
ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
BTRFS_I(dir)->root->root_key.objectid,
location->objectid);
if (ret) {
if (ret < 0)
err = ret;
goto out;
}
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
goto out;
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
(unsigned long)(ref + 1),
dentry->d_name.len);
if (ret)
goto out;
btrfs_release_path(path);
new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
}
if (btrfs_root_refs(&new_root->root_item) == 0) {
err = -ENOENT;
goto out;
}
*sub_root = new_root;
location->objectid = btrfs_root_dirid(&new_root->root_item);
location->type = BTRFS_INODE_ITEM_KEY;
location->offset = 0;
err = 0;
out:
btrfs_free_path(path);
return err;
}
static void inode_tree_add(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
u64 ino = btrfs_ino(inode);
again:
p = &root->inode_tree.rb_node;
parent = NULL;
if (inode_unhashed(inode))
return;
spin_lock(&root->inode_lock);
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
if (ino < btrfs_ino(&entry->vfs_inode))
p = &parent->rb_left;
else if (ino > btrfs_ino(&entry->vfs_inode))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING)));
rb_erase(parent, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
goto again;
}
}
rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
spin_lock(&root->inode_lock);
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
empty = RB_EMPTY_ROOT(&root->inode_tree);
}
spin_unlock(&root->inode_lock);
/*
* Free space cache has inodes in the tree root, but the tree root has a
* root_refs of 0, so this could end up dropping the tree root as a
* snapshot, so we need the extra !root->fs_info->tree_root check to
* make sure we don't drop it.
*/
if (empty && btrfs_root_refs(&root->root_item) == 0 &&
root != root->fs_info->tree_root) {
synchronize_srcu(&root->fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
if (empty)
btrfs_add_dead_root(root);
}
}
void btrfs_invalidate_inodes(struct btrfs_root *root)
{
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
u64 objectid = 0;
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
spin_lock(&root->inode_lock);
again:
node = root->inode_tree.rb_node;
prev = NULL;
while (node) {
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
if (objectid < btrfs_ino(&entry->vfs_inode))
node = node->rb_left;
else if (objectid > btrfs_ino(&entry->vfs_inode))
node = node->rb_right;
else
break;
}
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
if (objectid <= btrfs_ino(&entry->vfs_inode)) {
node = prev;
break;
}
prev = rb_next(prev);
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
objectid = btrfs_ino(&entry->vfs_inode) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
if (atomic_read(&inode->i_count) > 1)
d_prune_aliases(inode);
/*
* btrfs_drop_inode will have it removed from
* the inode cache when its usage count
* hits zero.
*/
iput(inode);
cond_resched();
spin_lock(&root->inode_lock);
goto again;
}
if (cond_resched_lock(&root->inode_lock))
goto again;
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
}
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
inode->i_ino = args->ino;
BTRFS_I(inode)->root = args->root;
btrfs_set_inode_space_info(args->root, inode);
return 0;
}
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
return args->ino == btrfs_ino(inode) &&
args->root == BTRFS_I(inode)->root;
}
static struct inode *btrfs_iget_locked(struct super_block *s,
u64 objectid,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
args.ino = objectid;
args.root = root;
inode = iget5_locked(s, objectid, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;
}
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
unlock_new_inode(inode);
iput(inode);
inode = ERR_PTR(-ESTALE);
}
}
return inode;
}
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct inode *inode = new_inode(s);
if (!inode)
return ERR_PTR(-ENOMEM);
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
BTRFS_I(inode)->dummy_inode = 1;
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
return inode;
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
if (unlikely(d_need_lookup(dentry))) {
memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
kfree(dentry->d_fsdata);
dentry->d_fsdata = NULL;
/* This thing is hashed, drop it for now */
d_drop(dentry);
} else {
ret = btrfs_inode_by_name(dir, dentry, &location);
}
if (ret < 0)
return ERR_PTR(ret);
if (location.objectid == 0)
return NULL;
if (location.type == BTRFS_INODE_ITEM_KEY) {
inode = btrfs_iget(dir->i_sb, &location, root, NULL);
return inode;
}
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&root->fs_info->subvol_srcu);
ret = fixup_tree_root_location(root, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&root->fs_info->cleanup_work_sem);
if (ret)
inode = ERR_PTR(ret);
}
return inode;
}
static int btrfs_dentry_delete(const struct dentry *dentry)
{
struct btrfs_root *root;
struct inode *inode = dentry->d_inode;
if (!inode && !IS_ROOT(dentry))
inode = dentry->d_parent->d_inode;
if (inode) {
root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0)
return 1;
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return 1;
}
return 0;
}
static void btrfs_dentry_release(struct dentry *dentry)
{
if (dentry->d_fsdata)
kfree(dentry->d_fsdata);
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret;
ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
if (unlikely(d_need_lookup(dentry))) {
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
spin_unlock(&dentry->d_lock);
}
return ret;
}
unsigned char btrfs_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
static int btrfs_real_readdir(struct file *filp, void *dirent,
filldir_t filldir)
{
struct inode *inode = filp->f_dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
int ret;
struct extent_buffer *leaf;
int slot;
unsigned char d_type;
int over = 0;
u32 di_cur;
u32 di_total;
u32 di_len;
int key_type = BTRFS_DIR_INDEX_KEY;
char tmp_name[32];
char *name_ptr;
int name_len;
int is_curr = 0; /* filp->f_pos points to the current index? */
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
key_type = BTRFS_DIR_ITEM_KEY;
/* special case for "." */
if (filp->f_pos == 0) {
over = filldir(dirent, ".", 1,
filp->f_pos, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
}
/* special case for .., just use the back ref */
if (filp->f_pos == 1) {
u64 pino = parent_ino(filp->f_path.dentry);
over = filldir(dirent, "..", 2,
filp->f_pos, pino, DT_DIR);
if (over)
return 0;
filp->f_pos = 2;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 1;
if (key_type == BTRFS_DIR_INDEX_KEY) {
INIT_LIST_HEAD(&ins_list);
INIT_LIST_HEAD(&del_list);
btrfs_get_delayed_items(inode, &ins_list, &del_list);
}
btrfs_set_key_type(&key, key_type);
key.offset = filp->f_pos;
key.objectid = btrfs_ino(inode);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto err;
else if (ret > 0)
break;
continue;
}
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid)
break;
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
goto next;
if (key_type == BTRFS_DIR_INDEX_KEY &&
btrfs_should_delete_dir_index(&del_list,
found_key.offset))
goto next;
filp->f_pos = found_key.offset;
is_curr = 1;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
di_cur = 0;
di_total = btrfs_item_size(leaf, item);
while (di_cur < di_total) {
struct btrfs_key location;
if (verify_dir_item(root, leaf, di))
break;
name_len = btrfs_dir_name_len(leaf, di);
if (name_len <= sizeof(tmp_name)) {
name_ptr = tmp_name;
} else {
name_ptr = kmalloc(name_len, GFP_NOFS);
if (!name_ptr) {
ret = -ENOMEM;
goto err;
}
}
read_extent_buffer(leaf, name_ptr,
(unsigned long)(di + 1), name_len);
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
/* is this a reference to our own snapshot? If so
* skip it.
*
* In contrast to old kernels, we insert the snapshot's
* dir item and dir index after it has been created, so
* we won't find a reference to our own snapshot. We
* still keep the following code for backward
* compatibility.
*/
if (location.type == BTRFS_ROOT_ITEM_KEY &&
location.objectid == root->root_key.objectid) {
over = 0;
goto skip;
}
over = filldir(dirent, name_ptr, name_len,
found_key.offset, location.objectid,
d_type);
skip:
if (name_ptr != tmp_name)
kfree(name_ptr);
if (over)
goto nopos;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
next:
path->slots[0]++;
}
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
filp->f_pos++;
ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
&ins_list);
if (ret)
goto nopos;
}
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
/*
* 32-bit glibc will use getdents64, but then strtol -
* so the last number we can serve is this.
*/
filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
ret = 0;
err:
if (key_type == BTRFS_DIR_INDEX_KEY)
btrfs_put_delayed_items(&ins_list, &del_list);
btrfs_free_path(path);
return ret;
}
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret = 0;
bool nolock = false;
if (BTRFS_I(inode)->dummy_inode)
return 0;
if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (nolock)
ret = btrfs_end_transaction_nolock(trans, root);
else
ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
return 0;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
return ret;
}
/*
* This is a copy of file_update_time. We need this so we can return error on
* ENOSPC for updating the inode in the case of file write and mmap writes.
*/
int btrfs_update_time(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
int ret;
enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
now = current_fs_time(inode->i_sb);
if (!timespec_equal(&inode->i_mtime, &now))
sync_it = S_MTIME;
if (!timespec_equal(&inode->i_ctime, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode))
sync_it |= S_VERSION;
if (!sync_it)
return 0;
/* Finally allowed to write? Takes lock. */
if (mnt_want_write_file(file))
return 0;
/* Only change inode inside the lock region */
if (sync_it & S_VERSION)
inode_inc_iversion(inode);
if (sync_it & S_CTIME)
inode->i_ctime = now;
if (sync_it & S_MTIME)
inode->i_mtime = now;
ret = btrfs_dirty_inode(inode);
if (!ret)
mark_inode_dirty_sync(inode);
mnt_drop_write(file->f_path.mnt);
return ret;
}
/*
* find the highest existing sequence number in a directory
* and then set the in-memory index_cnt variable to reflect
* free sequence numbers
*/
static int btrfs_set_inode_index_count(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key key, found_key;
struct btrfs_path *path;
struct extent_buffer *leaf;
int ret;
key.objectid = btrfs_ino(inode);
btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
/* FIXME: we should be able to handle this */
if (ret == 0)
goto out;
ret = 0;
/*
* MAGIC NUMBER EXPLANATION:
* since we search a directory based on f_pos we have to start at 2
* since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
* else has to start at 2
*/
if (path->slots[0] == 0) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
BTRFS_I(inode)->index_cnt = found_key.offset + 1;
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to find a free sequence number in a given directory. This current
* code is very simple, later versions will do smarter things in the btree
*/
int btrfs_set_inode_index(struct inode *dir, u64 *index)
{
int ret = 0;
if (BTRFS_I(dir)->index_cnt == (u64)-1) {
ret = btrfs_inode_delayed_dir_index_count(dir);
if (ret) {
ret = btrfs_set_inode_index_count(dir);
if (ret)
return ret;
}
}
*index = BTRFS_I(dir)->index_cnt;
BTRFS_I(dir)->index_cnt++;
return ret;
}
static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
u64 ref_objectid, u64 objectid,
umode_t mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
struct btrfs_key *location;
struct btrfs_path *path;
struct btrfs_inode_ref *ref;
struct btrfs_key key[2];
u32 sizes[2];
unsigned long ptr;
int ret;
int owner;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
}
/*
* we have to initialize this early, so we can reclaim the inode
* number if we fail afterwards in this function.
*/
inode->i_ino = objectid;
if (dir) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
}
/*
* index_cnt is ignored for everything but a dir,
* btrfs_get_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
btrfs_set_inode_space_info(root, inode);
if (S_ISDIR(mode))
owner = 0;
else
owner = 1;
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
key[0].offset = 0;
key[1].objectid = objectid;
btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
key[1].offset = ref_objectid;
sizes[0] = sizeof(struct btrfs_inode_item);
sizes[1] = name_len + sizeof(*ref);
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
if (ret != 0)
goto fail;
inode_init_owner(inode, dir, mode);
inode_set_bytes(inode, 0);
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, path->nodes[0], inode_item, inode);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
location = &BTRFS_I(inode)->location;
location->objectid = objectid;
location->offset = 0;
btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW) ||
(BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
insert_inode_hash(inode);
inode_tree_add(inode);
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
return inode;
fail:
if (dir)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
static inline u8 btrfs_inode_type(struct inode *inode)
{
return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
}
/*
* utility function to add 'inode' into 'parent_inode' with
* a give name and a given sequence number.
* if 'add_backref' is true, also insert a backref from the
* inode to the parent directory.
*/
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
u64 ino = btrfs_ino(inode);
u64 parent_ino = btrfs_ino(parent_inode);
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
key.objectid = ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, index, name, name_len);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
parent_ino, index);
}
/* Nothing to clean up yet */
if (ret)
return ret;
ret = btrfs_insert_dir_item(trans, root, name, name_len,
parent_inode, &key,
btrfs_inode_type(inode), index);
if (ret == -EEXIST)
goto fail_dir_item;
else if (ret) {
btrfs_abort_transaction(trans, root, ret);
return ret;
}
btrfs_i_size_write(parent_inode, parent_inode->i_size +
name_len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
return ret;
fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
key.objectid, root->root_key.objectid,
parent_ino, &local_index, name, name_len);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
}
return ret;
}
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
struct inode *dir, struct dentry *dentry,
struct inode *inode, int backref, u64 index)
{
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
if (err > 0)
err = -EEXIST;
return err;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
unsigned long nr = 0;
u64 index = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
return err;
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, struct nameidata *nd)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int drop_inode = 0;
int err;
unsigned long nr = 0;
u64 objectid;
u64 index = 0;
/*
* 2 for inode item and ref
* 2 for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = old_dentry->d_inode;
u64 index;
unsigned long nr = 0;
int err;
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
if (root->objectid != BTRFS_I(inode)->root->objectid)
return -EXDEV;
if (inode->i_nlink == ~0U)
return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
/*
* 2 items for inode and inode ref
* 2 items for dir items
* 1 item for parent inode
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto fail;
}
btrfs_inc_nlink(inode);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
if (err) {
drop_inode = 1;
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
if (err)
goto fail;
d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
fail:
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
unsigned long nr = 1;
/*
* 2 items for inode and ref
* 2 items for dir items
* 1 for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_fail;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fail;
}
drop_on_err = 1;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
goto out_fail;
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
goto out_fail;
err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
goto out_fail;
d_instantiate(dentry, inode);
drop_on_err = 0;
out_fail:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_on_err)
iput(inode);
btrfs_btree_balance_dirty(root, nr);
return err;
}
/* helper for btfs_get_extent. Given an existing extent in the tree,
* and an extent that you want to insert, deal with overlap and insert
* the new extent into the tree.
*/
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
u64 map_start, u64 map_len)
{
u64 start_diff;
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
start_diff = map_start - em->start;
em->start = map_start;
em->len = map_len;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
em->block_len -= start_diff;
}
return add_extent_mapping(em_tree, em);
}
static noinline int uncompress_inline(struct btrfs_path *path,
struct inode *inode, struct page *page,
size_t pg_offset, u64 extent_offset,
struct btrfs_file_extent_item *item)
{
int ret;
struct extent_buffer *leaf = path->nodes[0];
char *tmp;
size_t max_size;
unsigned long inline_size;
unsigned long ptr;
int compress_type;
WARN_ON(pg_offset != 0);
compress_type = btrfs_file_extent_compression(leaf, item);
max_size = btrfs_file_extent_ram_bytes(leaf, item);
inline_size = btrfs_file_extent_inline_item_len(leaf,
btrfs_item_nr(leaf, path->slots[0]));
tmp = kmalloc(inline_size, GFP_NOFS);
if (!tmp)
return -ENOMEM;
ptr = btrfs_file_extent_inline_start(item);
read_extent_buffer(leaf, tmp, ptr, inline_size);
max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
if (ret) {
char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size);
kunmap_atomic(kaddr);
}
kfree(tmp);
return 0;
}
/*
* a bit scary, this does extent mapping from logical file offset to the disk.
* the ugly parts come from merging extents from the disk with the in-ram
* representation. This gets more complex because of the data=ordered code,
* where the in-ram extents might be locked pending data=ordered completion.
*
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
int ret;
int err = 0;
u64 bytenr;
u64 extent_start = 0;
u64 extent_end = 0;
u64 objectid = btrfs_ino(inode);
u32 found_type;
struct btrfs_path *path = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *item;
struct extent_buffer *leaf;
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_trans_handle *trans = NULL;
int compress_type;
again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
free_extent_map(em);
else if (em->block_start == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
}
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
em->len = (u64)-1;
em->block_len = (u64)-1;
if (!path) {
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
/*
* Chances are we'll be called again, so go ahead and do
* readahead
*/
path->reada = 1;
}
ret = btrfs_lookup_file_extent(trans, root, path,
objectid, start, trans != NULL);
if (ret < 0) {
err = ret;
goto out;
}
if (ret != 0) {
if (path->slots[0] == 0)
goto not_found;
path->slots[0]--;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
/* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
if (found_key.objectid != objectid ||
found_type != BTRFS_EXTENT_DATA_KEY) {
goto not_found;
}
found_type = btrfs_file_extent_type(leaf, item);
extent_start = found_key.offset;
compress_type = btrfs_file_extent_compression(leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, item);
extent_end = (extent_start + size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
}
if (start >= extent_end) {
path->slots[0]++;
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
goto not_found;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != objectid ||
found_key.type != BTRFS_EXTENT_DATA_KEY)
goto not_found;
if (start + len <= found_key.offset)
goto not_found;
em->start = start;
em->len = found_key.offset - start;
goto not_found_em;
}
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
em->start = extent_start;
em->len = extent_end - extent_start;
em->orig_start = extent_start -
btrfs_file_extent_offset(leaf, item);
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
if (bytenr == 0) {
em->block_start = EXTENT_MAP_HOLE;
goto insert;
}
if (compress_type != BTRFS_COMPRESS_NONE) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
em->block_start = bytenr;
em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
item);
} else {
bytenr += btrfs_file_extent_offset(leaf, item);
em->block_start = bytenr;
em->block_len = em->len;
if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
}
goto insert;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
unsigned long ptr;
char *map;
size_t size;
size_t extent_offset;
size_t copy_size;
em->block_start = EXTENT_MAP_INLINE;
if (!page || create) {
em->start = extent_start;
em->len = extent_end - extent_start;
goto out;
}
size = btrfs_file_extent_inline_len(leaf, item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
em->len = (copy_size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
em->orig_start = EXTENT_MAP_INLINE;
if (compress_type) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
}
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
if (create == 0 && !PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, inode, page,
pg_offset,
extent_offset, item);
BUG_ON(ret); /* -ENOMEM */
} else {
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
memset(map + pg_offset + copy_size, 0,
PAGE_CACHE_SIZE - pg_offset -
copy_size);
}
kunmap(page);
}
flush_dcache_page(page);
} else if (create && PageUptodate(page)) {
BUG();
if (!trans) {
kunmap(page);
free_extent_map(em);
em = NULL;
btrfs_release_path(path);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
}
map = kmap(page);
write_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
kunmap(page);
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
WARN_ON(1);
}
not_found:
em->start = start;
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
"[%llu %llu]\n", (unsigned long long)em->start,
(unsigned long long)em->len,
(unsigned long long)start,
(unsigned long long)len);
err = -EIO;
goto out;
}
err = 0;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
*/
if (ret == -EEXIST) {
struct extent_map *existing;
ret = 0;
existing = lookup_extent_mapping(em_tree, start, len);
if (existing && (existing->start > start ||
existing->start + existing->len <= start)) {
free_extent_map(existing);
existing = NULL;
}
if (!existing) {
existing = lookup_extent_mapping(em_tree, em->start,
em->len);
if (existing) {
err = merge_extent_mapping(em_tree, existing,
em, start,
root->sectorsize);
free_extent_map(existing);
if (err) {
free_extent_map(em);
em = NULL;
}
} else {
err = -EIO;
free_extent_map(em);
em = NULL;
}
} else {
free_extent_map(em);
em = existing;
err = 0;
}
}
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
if (!err)
err = ret;
}
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
BUG_ON(!em); /* Error is always set */
return em;
}
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
u64 range_start = start;
u64 end;
u64 found;
u64 found_end;
int err = 0;
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
if (em) {
/*
* if our em maps to a hole, there might
* actually be delalloc bytes behind it
*/
if (em->block_start != EXTENT_MAP_HOLE)
return em;
else
hole_em = em;
}
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
if (end < start)
end = (u64)-1;
else
end -= 1;
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
end, len, EXTENT_DELALLOC, 1);
found_end = range_start + found;
if (found_end < range_start)
found_end = (u64)-1;
/*
* we didn't find anything useful, return
* the original results from get_extent()
*/
if (range_start > end || found_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
/* adjust the range_start to make sure it doesn't
* go backwards from the start they passed in
*/
range_start = max(start,range_start);
found = found_end - range_start;
if (found > 0) {
u64 hole_start = start;
u64 hole_len = len;
em = alloc_extent_map();
if (!em) {
err = -ENOMEM;
goto out;
}
/*
* when btrfs_get_extent can't find anything it
* returns one huge hole
*
* make sure what it found really fits our range, and
* adjust to make sure it is based on the start from
* the caller
*/
if (hole_em) {
u64 calc_end = extent_map_end(hole_em);
if (calc_end <= start || (hole_em->start > end)) {
free_extent_map(hole_em);
hole_em = NULL;
} else {
hole_start = max(hole_em->start, start);
hole_len = calc_end - hole_start;
}
}
em->bdev = NULL;
if (hole_em && range_start > hole_start) {
/* our hole starts before our delalloc, so we
* have to return just the parts of the hole
* that go until the delalloc starts
*/
em->len = min(hole_len,
range_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
* don't adjust block start at all,
* it is fixed at EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
} else {
em->start = range_start;
em->len = found;
em->orig_start = range_start;
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
} else if (hole_em) {
return hole_em;
}
out:
free_extent_map(hole_em);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
return em;
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
bool insert = false;
/*
* Ok if the extent map we looked up is a hole and is for the exact
* range we want, there is no reason to allocate a new one, however if
* it is not right then we need to free this one and drop the cache for
* our range.
*/
if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
em->len != len) {
free_extent_map(em);
em = NULL;
insert = true;
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
btrfs_add_inode_defrag(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
alloc_hint, &ins, 1);
if (ret) {
em = ERR_PTR(ret);
goto out;
}
if (!em) {
em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
}
}
em->start = start;
em->orig_start = em->start;
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
/*
* We need to do this because if we're using the original em we searched
* for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
*/
em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
}
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
em = ERR_PTR(ret);
}
out:
btrfs_end_transaction(trans, root);
return em;
}
/*
* returns 1 when the nocow is safe, < 1 on error, 0 if the
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
struct inode *inode, u64 offset, u64 len)
{
struct btrfs_path *path;
int ret;
struct extent_buffer *leaf;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 disk_bytenr;
u64 backref_offset;
u64 extent_end;
u64 num_bytes;
int slot;
int found_type;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
offset, 0);
if (ret < 0)
goto out;
slot = path->slots[0];
if (ret == 1) {
if (slot == 0) {
/* can't find the item, must cow */
ret = 0;
goto out;
}
slot--;
}
ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != btrfs_ino(inode) ||
key.type != BTRFS_EXTENT_DATA_KEY) {
/* not our file or wrong item type, must cow */
goto out;
}
if (key.offset > offset) {
/* Wrong offset, must cow */
goto out;
}
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
found_type = btrfs_file_extent_type(leaf, fi);
if (found_type != BTRFS_FILE_EXTENT_REG &&
found_type != BTRFS_FILE_EXTENT_PREALLOC) {
/* not a regular extent, must cow */
goto out;
}
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end < offset + len) {
/* extent doesn't include our full range, must cow */
goto out;
}
if (btrfs_extent_readonly(root, disk_bytenr))
goto out;
/*
* look for other files referencing this extent, if we
* find any we must cow
*/
if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
key.offset - backref_offset, disk_bytenr))
goto out;
/*
* adjust disk_bytenr and num_bytes to cover just the bytes
* in this extent we are about to write. If there
* are any csums in that range we have to cow in order
* to keep the csums correct
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
num_bytes = min(offset + len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
ret = 1;
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start = iblock << inode->i_blkbits;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em))
return PTR_ERR(em);
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
* io. INLINE is special, and we could probably kludge it in here, but
* it's still buffered so for safety lets just fall back to the generic
* buffered path.
*
* For COMPRESSED we _have_ to read the entire extent in so we can
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
* We return -ENOTBLK because thats what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
return -ENOTBLK;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */
unlock_extent(&BTRFS_I(inode)->io_tree, start,
start + root->sectorsize - 1);
return 0;
}
/*
* We don't allocate a new extent in the following cases
*
* 1) The inode is marked as NODATACOW. In this case we'll just use the
* existing extent.
* 2) The extent is marked as PREALLOC. We're good to go here and can
* just use the extent.
*
*/
if (!create) {
len = em->len - (start - em->start);
goto map;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
u64 block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
else
type = BTRFS_ORDERED_NOCOW;
len = min(len, em->len - (start - em->start));
block_start = em->block_start + (start - em->start);
/*
* we're not going to log anything, but we do need
* to make sure the current transaction stays open
* while we look for nocow cross refs
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
ret = btrfs_add_ordered_extent_dio(inode, start,
block_start, len, len, type);
btrfs_end_transaction(trans, root);
if (ret) {
free_extent_map(em);
return ret;
}
goto unlock;
}
btrfs_end_transaction(trans, root);
}
must_cow:
/*
* this will cow the extent, reset the len in case we changed
* it above
*/
len = bh_result->b_size;
em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
unlock:
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
0, NULL, GFP_NOFS);
map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
bh_result->b_bdev = em->bdev;
set_buffer_mapped(bh_result);
if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
set_buffer_new(bh_result);
free_extent_map(em);
return 0;
}
struct btrfs_dio_private {
struct inode *inode;
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
u32 *csums;
void *private;
/* number of bios pending for this dio */
atomic_t pending_bios;
/* IO errors */
int errors;
struct bio *orig_bio;
};
static void btrfs_endio_direct_read(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
u32 *private = dip->csums;
start = dip->logical_offset;
do {
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
struct page *page = bvec->bv_page;
char *kaddr;
u32 csum = ~(u32)0;
unsigned long flags;
local_irq_save(flags);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
if (csum != *private) {
printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)start,
csum, *private);
err = -EIO;
}
}
start += bvec->bv_len;
private++;
bvec++;
} while (bvec <= bvec_end);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static void btrfs_endio_direct_write(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered = NULL;
struct extent_state *cached_state = NULL;
u64 ordered_offset = dip->logical_offset;
u64 ordered_bytes = dip->bytes;
int ret;
if (err)
goto out_done;
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
ordered_bytes);
if (!ret)
goto out_test;
BUG_ON(!ordered);
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
err = -ENOMEM;
goto out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret)
err = btrfs_update_inode_fallback(trans, root, inode);
goto out;
}
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1, 0,
&cached_state);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode,
ordered->file_offset,
ordered->file_offset +
ordered->len);
if (ret) {
err = ret;
goto out_unlock;
}
} else {
ret = insert_reserved_file_extent(trans, inode,
ordered->file_offset,
ordered->start,
ordered->disk_len,
ordered->len,
ordered->len,
0, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered->file_offset, ordered->len);
if (ret) {
err = ret;
WARN_ON(1);
goto out_unlock;
}
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
ret = btrfs_ordered_update_i_size(inode, 0, ordered);
if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
btrfs_update_inode_fallback(trans, root, inode);
ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
&cached_state, GFP_NOFS);
out:
btrfs_delalloc_release_metadata(inode, ordered->len);
btrfs_end_transaction(trans, root);
ordered_offset = ordered->file_offset + ordered->len;
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
* completed the accounting for the whole dio, go back and try again
*/
if (ordered_offset < dip->logical_offset + dip->bytes) {
ordered_bytes = dip->logical_offset + dip->bytes -
ordered_offset;
goto again;
}
out_done:
bio->bi_private = dip->private;
kfree(dip->csums);
kfree(dip);
/* If we had an error make sure to clear the uptodate flag */
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
dio_end_io(bio, err);
}
static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
static void btrfs_end_dio_bio(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
if (err) {
printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
"sector %#Lx len %u err no %d\n",
(unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
(unsigned long long)bio->bi_sector, bio->bi_size, err);
dip->errors = 1;
/*
* before atomic variable goto zero, we must make sure
* dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
}
/* if there are more bios still pending for this dio, just exit */
if (!atomic_dec_and_test(&dip->pending_bios))
goto out;
if (dip->errors)
bio_io_error(dip->orig_bio);
else {
set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
bio_endio(dip->orig_bio, 0);
}
out:
bio_put(bio);
}
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
u64 first_sector, gfp_t gfp_flags)
{
int nr_vecs = bio_get_nr_vecs(bdev);
return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
if (ret)
goto err;
if (skip_sum)
goto map;
if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
} else if (write) {
/*
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
if (ret)
goto err;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
if (ret)
goto err;
}
map:
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
}
static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
u64 start_sector = orig_bio->bi_sector;
u64 file_offset = dip->logical_offset;
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
int async_submit = 0;
int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
return -EIO;
}
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len)) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the dip might get freed
* before we're done setting it up
*/
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
/* Write's use the ordered csums */
if (!write && !skip_sum)
csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
submit_len = 0;
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(bio);
goto out_err;
}
} else {
submit_len += bvec->bv_len;
nr_pages ++;
bvec++;
}
}
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
csums, async_submit);
if (!ret)
return 0;
bio_put(bio);
out_err:
dip->errors = 1;
/*
* before atomic variable goto zero, we must
* make sure dip->errors is perceived to be set.
*/
smp_mb__before_atomic_dec();
if (atomic_dec_and_test(&dip->pending_bios))
bio_io_error(dip->orig_bio);
/* bio_end_io() will handle error, so we needn't return it */
return 0;
}
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_private *dip;
struct bio_vec *bvec = bio->bi_io_vec;
int skip_sum;
int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
dip = kmalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
goto free_ordered;
}
dip->csums = NULL;
/* Write's use the ordered csum stuff, so we don't need dip->csums */
if (!write && !skip_sum) {
dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
if (!dip->csums) {
kfree(dip);
ret = -ENOMEM;
goto free_ordered;
}
}
dip->private = bio->bi_private;
dip->inode = inode;
dip->logical_offset = file_offset;
dip->bytes = 0;
do {
dip->bytes += bvec->bv_len;
bvec++;
} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
dip->disk_bytenr = (u64)bio->bi_sector << 9;
bio->bi_private = dip;
dip->errors = 0;
dip->orig_bio = bio;
atomic_set(&dip->pending_bios, 0);
if (write)
bio->bi_end_io = btrfs_endio_direct_write;
else
bio->bi_end_io = btrfs_endio_direct_read;
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
free_ordered:
/*
* If this is a write, we need to clean up the reserved space and kill
* the ordered extent.
*/
if (write) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
btrfs_free_reserved_extent(root, ordered->start,
ordered->disk_len);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
bio_endio(bio, ret);
}
static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
{
int seg;
int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
ssize_t retval = -EINVAL;
loff_t end = offset;
if (offset & blocksize_mask)
goto out;
/* Check the memory alignment. Blocks cannot straddle pages */
if (iov_iter_has_iovec(iter)) {
const struct iovec *iov = iov_iter_iovec(iter);
for (seg = 0; seg < iter->nr_segs; seg++) {
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
/* If this is a write we don't need to check anymore */
if (rw & WRITE)
continue;
/*
* Check to make sure we don't have duplicate iov_base's
* in this iovec, if so return EINVAL, otherwise we'll
* get csum errors when reading back.
*/
for (i = seg + 1; i < iter->nr_segs; i++) {
if (iov[seg].iov_base == iov[i].iov_base)
goto out;
}
}
} else if (iov_iter_has_bvec(iter)) {
struct bio_vec *bvec = iov_iter_bvec(iter);
for (seg = 0; seg < iter->nr_segs; seg++) {
addr = (unsigned long)bvec[seg].bv_offset;
size = bvec[seg].bv_len;
end += size;
if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
}
} else
BUG();
retval = 0;
out:
return retval;
}
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter, loff_t offset)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 lockstart, lockend;
ssize_t ret;
int writing = rw & WRITE;
int write_bits = 0;
size_t count = iov_length(iov, nr_segs);
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
return 0;
lockstart = offset;
lockend = offset + count - 1;
if (writing) {
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
}
while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
0, &cached_state);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
* extents in this range.
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (!ordered)
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
cond_resched();
}
/*
* we don't use btrfs_set_extent_delalloc because we don't want
* the dirty or uptodate bits
*/
if (writing) {
write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
EXTENT_DELALLOC, NULL, &cached_state,
GFP_NOFS);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_LOCKED | write_bits,
1, 0, &cached_state, GFP_NOFS);
goto out;
}
}
free_extent_state(cached_state);
cached_state = NULL;
ret = __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iter, offset, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, 0);
if (ret < 0 && ret != -EIOCBQUEUED) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
offset + iov_iter_count(iter) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
} else if (ret >= 0 && ret < iov_iter_count(iter)) {
/*
* We're falling back to buffered, unlock the section we didn't
* do IO on.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
offset + iov_iter_count(iter) - 1,
EXTENT_LOCKED | write_bits, 1, 0,
&cached_state, GFP_NOFS);
}
out:
free_extent_state(cached_state);
return ret;
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_read_full_page(tree, page, btrfs_get_extent, 0);
}
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
tree = &BTRFS_I(page->mapping->host)->io_tree;
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
}
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
}
static int
btrfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
return extent_readpages(tree, mapping, pages, nr_pages,
btrfs_get_extent);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
struct extent_io_tree *tree;
struct extent_map_tree *map;
int ret;
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
ret = try_release_extent_mapping(map, tree, page, gfp_flags);
if (ret == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
return ret;
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
}
static void btrfs_invalidatepage(struct page *page, unsigned long offset)
{
struct extent_io_tree *tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback(page);
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (offset) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
if (ordered) {
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
&cached_state, GFP_NOFS);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if (TestClearPagePrivate2(page)) {
btrfs_finish_ordered_io(page->mapping->host,
page_start, page_end);
}
btrfs_put_ordered_extent(ordered);
cached_state = NULL;
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
if (PagePrivate(page)) {
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
}
/*
* btrfs_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
* be careful to check for EOF conditions here. We set the page up correctly
* for a written page which means we get ENOSPC checking when writing into
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* vmtruncate() writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*/
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = fdentry(vma->vm_file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
char *kaddr;
unsigned long zero_start;
loff_t size;
int ret;
int reserved = 0;
u64 page_start;
u64 page_end;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret) {
ret = btrfs_update_time(vma->vm_file);
reserved = 1;
}
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
goto out_noreserve;
}
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
again:
lock_page(page);
size = i_size_read(inode);
page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
if ((page->mapping != inode->i_mapping) ||
(page_start >= size)) {
/* page got truncated out from underneath us */
goto out_unlock;
}
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
set_page_extent_mapped(page);
/*
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
goto again;
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
* clear any delalloc bits for the range we are fixing to save. There
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
&cached_state);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
ret = 0;
/* page is wholly or partially inside EOF */
if (page_start + PAGE_CACHE_SIZE > size)
zero_start = size & ~PAGE_CACHE_MASK;
else
zero_start = PAGE_CACHE_SIZE;
if (zero_start != PAGE_CACHE_SIZE) {
kaddr = kmap(page);
memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
flush_dcache_page(page);
kunmap(page);
}
ClearPageChecked(page);
set_page_dirty(page);
SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
out_unlock:
if (!ret)
return VM_FAULT_LOCKED;
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
return ret;
}
static int btrfs_truncate(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
int ret;
int err = 0;
struct btrfs_trans_handle *trans;
unsigned long nr;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
return ret;
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
* delete our orphan item. Lord knows we don't want to have a dangling
* orphan item because we didn't reserve space to remove it.
*
* 2) We need to reserve space to update our inode.
*
* 3) We need to have something to cache all the space that is going to
* be free'd up by the truncate operation, but also have some slack
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
* And we need these to all be seperate. The fact is we can use alot of
* space doing the truncate, and we have no earthly idea how much space
* we will use, so we need the truncate reservation to be seperate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
* entirety of the opration, so that has to be done seperately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
*
* So that leaves us with
*
* 1) root->orphan_block_rsv - for the orphan deletion.
* 2) rsv - for the truncate reservation, which we will steal from the
* transaction reservation.
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
rsv = btrfs_alloc_block_rsv(root);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
/*
* 1 for the truncate slack space
* 1 for the orphan item we're going to add
* 1 for the orphan item deletion
* 1 for updating the inode.
*/
trans = btrfs_start_transaction(root, 4);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out;
}
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
min_size);
BUG_ON(ret);
ret = btrfs_orphan_add(trans, inode);
if (ret) {
btrfs_end_transaction(trans, root);
goto out;
}
/*
* setattr is responsible for setting the ordered_data_close flag,
* but that is only tested during the last file release. That
* could happen well after the next commit, leaving a great big
* window where new writes may get lost if someone chooses to write
* to this file after truncating to zero
*
* The inode doesn't have any dirty data here, and so if we commit
* this is a noop. If someone immediately starts writing to the inode
* it is very likely we'll catch some of their writes in this
* transaction, and the commit will find this file on the ordered
* data list with good things to send down.
*
* This is a best effort solution, there is still a window where
* using truncate to replace the contents of the file will
* end up with a zero length file after a crash.
*/
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
btrfs_add_ordered_operation(trans, root, inode);
while (1) {
ret = btrfs_block_rsv_refill(root, rsv, min_size);
if (ret) {
/*
* This can only happen with the original transaction we
* started above, every other time we shouldn't have a
* transaction started yet.
*/
if (ret == -EAGAIN)
goto end_trans;
err = ret;
break;
}
if (!trans) {
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = err = PTR_ERR(trans);
trans = NULL;
break;
}
}
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
if (ret != -EAGAIN) {
err = ret;
break;
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
break;
}
end_trans:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root, nr);
}
if (ret == 0 && inode->i_nlink > 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, inode);
if (ret)
err = ret;
} else if (ret && inode->i_nlink > 0) {
/*
* Failed to do the truncate, remove us from the in memory
* orphan list.
*/
ret = btrfs_orphan_del(NULL, inode);
}
if (trans) {
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret && !err)
err = ret;
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
}
out:
btrfs_free_block_rsv(root, rsv);
if (ret && !err)
err = ret;
return err;
}
/*
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid)
{
struct inode *inode;
int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
new_dirid, new_dirid,
S_IFDIR | (~current_umask() & S_IRWXUGO),
&index);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
set_nlink(inode, 1);
btrfs_i_size_write(inode, 0);
err = btrfs_update_inode(trans, new_root, inode);
iput(inode);
return err;
}
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
struct inode *inode;
ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->root = NULL;
ei->space_info = NULL;
ei->generation = 0;
ei->sequence = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->csum_bytes = 0;
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
ei->reserved_extents = 0;
ei->ordered_data_close = 0;
ei->orphan_meta_reserved = 0;
ei->dummy_inode = 0;
ei->in_defrag = 0;
ei->delalloc_meta_reserved = 0;
ei->force_compress = BTRFS_COMPRESS_NONE;
ei->delayed_node = NULL;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
extent_io_tree_init(&ei->io_tree, &inode->i_data);
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->ordered_operations);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
}
static void btrfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
void btrfs_destroy_inode(struct inode *inode)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
/*
* This can happen where we create an inode, but somebody else also
* created the same inode and we need to destroy the one we already
* created.
*/
if (!root)
goto free;
/*
* Make sure we're properly removed from the ordered operation
* lists.
*/
smp_mb();
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&BTRFS_I(inode)->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
}
spin_lock(&root->orphan_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
(unsigned long long)btrfs_ino(inode));
list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->orphan_lock);
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
if (!ordered)
break;
else {
printk(KERN_ERR "btrfs found ordered "
"extent %llu %llu on inode cleanup\n",
(unsigned long long)ordered->file_offset,
(unsigned long long)ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
}
inode_tree_del(inode);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
free:
btrfs_remove_delayed_node(inode);
call_rcu(&inode->i_rcu, btrfs_i_callback);
}
int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
!btrfs_is_free_space_inode(root, inode))
return 1;
else
return generic_drop_inode(inode);
}
static void init_once(void *foo)
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
inode_init_once(&ei->vfs_inode);
}
void btrfs_destroy_cachep(void)
{
if (btrfs_inode_cachep)
kmem_cache_destroy(btrfs_inode_cachep);
if (btrfs_trans_handle_cachep)
kmem_cache_destroy(btrfs_trans_handle_cachep);
if (btrfs_transaction_cachep)
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
if (!btrfs_inode_cachep)
goto fail;
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
sizeof(struct btrfs_trans_handle), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
sizeof(struct btrfs_transaction), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
sizeof(struct btrfs_path), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
return 0;
fail:
btrfs_destroy_cachep();
return -ENOMEM;
}
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
return 0;
}
/*
* If a file is moved, it will inherit the cow and compression flags of the new
* directory.
*/
static void fixup_inode_flags(struct inode *dir, struct inode *inode)
{
struct btrfs_inode *b_dir = BTRFS_I(dir);
struct btrfs_inode *b_inode = BTRFS_I(inode);
if (b_dir->flags & BTRFS_INODE_NODATACOW)
b_inode->flags |= BTRFS_INODE_NODATACOW;
else
b_inode->flags &= ~BTRFS_INODE_NODATACOW;
if (b_dir->flags & BTRFS_INODE_COMPRESS)
b_inode->flags |= BTRFS_INODE_COMPRESS;
else
b_inode->flags &= ~BTRFS_INODE_COMPRESS;
}
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
u64 root_objectid;
int ret;
u64 old_ino = btrfs_ino(old_inode);
if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
(new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
*/
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&root->fs_info->subvol_sem);
/*
* We want to reserve the absolute worst case amount of items. So if
* both inodes are subvols and we need to unlink them then that would
* require 4 item modifications, but if they are both normal inodes it
* would require 5 item modifications, so we'll assume their normal
* inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
* should cover the worst case number of items we'll modify.
*/
trans = btrfs_start_transaction(root, 20);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_notrans;
}
if (dest != root)
btrfs_record_root_in_trans(trans, dest);
ret = btrfs_set_inode_index(new_dir, &index);
if (ret)
goto out_fail;
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
/* force full log commit if subvolume involved. */
root->fs_info->last_trans_log_full_commit = trans->transid;
} else {
ret = btrfs_insert_inode_ref(trans, dest,
new_dentry->d_name.name,
new_dentry->d_name.len,
old_ino,
btrfs_ino(new_dir), index);
if (ret)
goto out_fail;
/*
* this is an ugly little race, but the rename is required
* to make sure that if we crash, the inode is either at the
* old name or the new one. pinning the log transaction lets
* us make sure we don't allow a log commit to come in after
* we unlink the name but before we add the new name back in.
*/
btrfs_pin_log_trans(root);
}
/*
* make sure the inode gets flushed if it is replacing
* something.
*/
if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
old_inode->i_ctime = ctime;
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
ret = __btrfs_unlink_inode(trans, root, old_dir,
old_dentry->d_inode,
old_dentry->d_name.name,
old_dentry->d_name.len);
if (!ret)
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
if (unlikely(btrfs_ino(new_inode) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
ret = btrfs_unlink_subvol(trans, dest, new_dir,
root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, new_dir,
new_dentry->d_inode,
new_dentry->d_name.name,
new_dentry->d_name.len);
}
if (!ret && new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
BUG_ON(ret);
}
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
}
fixup_inode_flags(new_dir, old_inode);
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out_fail;
}
if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
struct dentry *parent = new_dentry->d_parent;
btrfs_log_new_name(trans, old_inode, old_dir, parent);
btrfs_end_log_trans(root);
}
out_fail:
btrfs_end_transaction(trans, root);
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
}
/*
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
struct inode *inode;
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
spin_lock(&root->fs_info->delalloc_lock);
while (!list_empty(head)) {
binode = list_entry(head->next, struct btrfs_inode,
delalloc_inodes);
inode = igrab(&binode->vfs_inode);
if (!inode)
list_del_init(&binode->delalloc_inodes);
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
filemap_flush(inode->i_mapping);
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
}
spin_unlock(&root->fs_info->delalloc_lock);
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
*/
atomic_inc(&root->fs_info->async_submit_draining);
while (atomic_read(&root->fs_info->nr_async_submits) ||
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_submits) == 0 &&
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
return 0;
}
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct btrfs_key key;
struct inode *inode = NULL;
int err;
int drop_inode = 0;
u64 objectid;
u64 index = 0 ;
int name_len;
int datasize;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
unsigned long nr = 0;
name_len = strlen(symname) + 1;
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
return -ENAMETOOLONG;
/*
* 2 items for inode item and ref
* 2 items for dir items
* 1 item for xattr if selinux is on
*/
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans))
return PTR_ERR(trans);
err = btrfs_find_free_ino(root, &objectid);
if (err)
goto out_unlock;
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
dentry->d_name.len, btrfs_ino(dir), objectid,
S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_unlock;
}
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err) {
drop_inode = 1;
goto out_unlock;
}
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
* if the filesystem supports xattrs by looking at the
* ops vector.
*/
inode->i_fop = &btrfs_file_operations;
inode->i_op = &btrfs_file_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
drop_inode = 1;
goto out_unlock;
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
datasize = btrfs_file_extent_calc_inline_size(name_len);
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
if (err) {
drop_inode = 1;
btrfs_free_path(path);
goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, ei, trans->transid);
btrfs_set_file_extent_type(leaf, ei,
BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_encryption(leaf, ei, 0);
btrfs_set_file_extent_compression(leaf, ei, 0);
btrfs_set_file_extent_other_encoding(leaf, ei, 0);
btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
inode->i_op = &btrfs_symlink_inode_operations;
inode->i_mapping->a_ops = &btrfs_symlink_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(inode, name_len - 1);
err = btrfs_update_inode(trans, root, inode);
if (err)
drop_inode = 1;
out_unlock:
if (!err)
d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root, nr);
return err;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 i_size;
int ret = 0;
bool own_trans = true;
if (trans)
own_trans = false;
while (num_bytes > 0) {
if (own_trans) {
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
}
ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
i_size_write(inode, i_size);
btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
}
if (own_trans)
btrfs_end_transaction(trans, root);
}
return ret;
}
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint,
NULL);
}
int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint)
{
return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
static int btrfs_permission(struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (mask & MAY_WRITE &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if (btrfs_root_readonly(root))
return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(inode, mask);
}
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
.unlink = btrfs_unlink,
.link = btrfs_link,
.mkdir = btrfs_mkdir,
.rmdir = btrfs_rmdir,
.rename = btrfs_rename,
.symlink = btrfs_symlink,
.setattr = btrfs_setattr,
.mknod = btrfs_mknod,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
};
static const struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
};
static struct extent_io_ops btrfs_extent_io_ops = {
.fill_delalloc = run_delalloc_range,
.submit_bio_hook = btrfs_submit_bio_hook,
.merge_bio_hook = btrfs_merge_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
.writepage_end_io_hook = btrfs_writepage_end_io_hook,
.writepage_start_hook = btrfs_writepage_start_hook,
.set_bit_hook = btrfs_set_bit_hook,
.clear_bit_hook = btrfs_clear_bit_hook,
.merge_extent_hook = btrfs_merge_extent_hook,
.split_extent_hook = btrfs_split_extent_hook,
};
/*
* btrfs doesn't support the bmap operation because swapfiles
* use bmap to make a mapping of extents in the file. They assume
* these extents won't change over the life of the file and they
* use the bmap result to do IO directly to the drive.
*
* the btrfs bmap call would return logical addresses that aren't
* suitable for IO and they also will change frequently as COW
* operations happen. So, swapfile + btrfs == corruption.
*
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations btrfs_symlink_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
};
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.fiemap = btrfs_fiemap,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
.get_acl = btrfs_get_acl,
};
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
.d_release = btrfs_dentry_release,
};
| gpl-2.0 |
voidz777/android_kernel_htc_shooterk | arch/arm/kernel/process.c | 12 | 14780 | /*
* linux/arch/arm/kernel/process.c
*
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Original Copyright (C) 1995 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <stdarg.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/utsname.h>
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/console.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
#include <asm/mach/time.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
static const char *processor_modes[] = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
"USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
"UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
};
static const char *isa_modes[] = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
static volatile int hlt_counter;
#include <mach/system.h>
void disable_hlt(void)
{
hlt_counter++;
}
void enable_hlt(void)
{
hlt_counter--;
}
static int __init nohlt_setup(char *__unused)
{
hlt_counter = 1;
return 1;
}
static int __init hlt_setup(char *__unused)
{
hlt_counter = 0;
return 1;
}
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
void arm_machine_flush_console(void)
{
printk("\n");
pr_emerg("Restarting %s\n", linux_banner);
if (console_trylock()) {
console_unlock();
return;
}
mdelay(50);
local_irq_disable();
if (!console_trylock())
pr_emerg("arm_restart: Console was locked! Busting\n");
else
pr_emerg("arm_restart: Console was locked!\n");
console_unlock();
}
#else
void arm_machine_flush_console(void)
{
}
#endif
void arm_machine_restart(char mode, const char *cmd)
{
/* Flush the console to make sure all the relevant messages make it
* out to the console drivers */
arm_machine_flush_console();
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
/*
* Tell the mm system that we are going to reboot -
* we may need it to insert some 1:1 mappings so that
* soft boot works.
*/
setup_mm_for_reboot();
/* Clean and invalidate caches */
flush_cache_all();
/* Turn off caching */
cpu_proc_fin();
/* Push out any further dirty data, and ensure cache is empty */
flush_cache_all();
/*Push out the dirty data from external caches */
outer_disable();
/*
* Now call the architecture specific reboot code.
*/
arch_reset(mode, cmd);
/*
* Whoops - the architecture was unable to reboot.
* Tell the user!
*/
mdelay(1000);
printk("Reboot failed -- System halted\n");
while (1);
}
/*
* Function pointers to optional machine specific functions
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
static void default_idle(void)
{
if (!need_resched())
arch_idle();
local_irq_enable();
}
void (*pm_idle)(void) = default_idle;
EXPORT_SYMBOL(pm_idle);
/*
* The idle thread, has rather strange semantics for calling pm_idle,
* but this is what x86 does and we need to do the same, so that
* things like cpuidle get called in the same way. The only difference
* is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
local_fiq_enable();
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_stop_sched_tick(1);
idle_notifier_call_chain(IDLE_START);
while (!need_resched()) {
local_irq_disable();
#ifdef CONFIG_PL310_ERRATA_769419
wmb();
#endif
if (hlt_counter) {
local_irq_enable();
cpu_relax();
} else {
stop_critical_timings();
pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle
* functions should always return with IRQs
* enabled.
*/
WARN_ON(irqs_disabled());
local_irq_enable();
}
}
tick_nohz_restart_sched_tick();
idle_notifier_call_chain(IDLE_END);
preempt_enable_no_resched();
schedule();
preempt_disable();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
}
}
static char reboot_mode = 'h';
int __init reboot_setup(char *str)
{
reboot_mode = str[0];
return 1;
}
__setup("reboot=", reboot_setup);
void machine_shutdown(void)
{
preempt_disable();
#ifdef CONFIG_SMP
local_irq_disable();
smp_send_stop();
#endif
}
void machine_halt(void)
{
machine_shutdown();
while (1);
}
void machine_power_off(void)
{
machine_shutdown();
if (pm_power_off)
pm_power_off();
}
void machine_restart(char *cmd)
{
machine_shutdown();
arm_pm_restart(reboot_mode, cmd);
}
/*
* dump a block of kernel memory from around the given address
*/
static void show_data(unsigned long addr, int nbytes, const char *name)
{
int i, j;
int nlines;
u32 *p;
/*
* don't attempt to dump non-kernel addresses or
* values that are probably just small negative numbers
*/
if (addr < PAGE_OFFSET || addr > -256UL)
return;
printk("\n%s: %#lx:\n", name, addr);
/*
* round address down to a 32 bit boundary
* and always dump a multiple of 32 bytes
*/
p = (u32 *)(addr & ~(sizeof(u32) - 1));
nbytes += (addr & (sizeof(u32) - 1));
nlines = (nbytes + 31) / 32;
for (i = 0; i < nlines; i++) {
/*
* just display low 16 bits of address to keep
* each line of the dump < 80 characters
*/
printk("%04lx ", (unsigned long)p & 0xffff);
for (j = 0; j < 8; j++) {
u32 data;
if (probe_kernel_address(p, data)) {
printk(" ********");
} else {
printk(" %08x", data);
}
++p;
}
printk("\n");
}
}
static void show_extra_register_data(struct pt_regs *regs, int nbytes)
{
mm_segment_t fs;
fs = get_fs();
set_fs(KERNEL_DS);
show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
set_fs(fs);
}
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
char buf[64];
printk("CPU: %d %s (%s %.*s)\n",
raw_smp_processor_id(), print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", regs->ARM_lr);
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->ARM_r10, regs->ARM_r9,
regs->ARM_r8);
printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
regs->ARM_r7, regs->ARM_r6,
regs->ARM_r5, regs->ARM_r4);
printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
regs->ARM_r3, regs->ARM_r2,
regs->ARM_r1, regs->ARM_r0);
flags = regs->ARM_cpsr;
buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
buf[4] = '\0';
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)],
get_fs() == get_ds() ? "kernel" : "user");
#ifdef CONFIG_CPU_CP15
{
unsigned int ctrl;
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
unsigned int transbase, dac;
asm("mrc p15, 0, %0, c2, c0\n\t"
"mrc p15, 0, %1, c3, c0\n"
: "=r" (transbase), "=r" (dac));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac);
}
#endif
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
printk("Control: %08x%s\n", ctrl, buf);
}
#endif
show_extra_register_data(regs, 128);
}
void show_regs(struct pt_regs * regs)
{
printk("\n");
printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
__show_regs(regs);
__backtrace();
}
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
EXPORT_SYMBOL_GPL(thread_notify_head);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
}
void flush_thread(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
flush_ptrace_hw_breakpoint(tsk);
memset(thread->used_cp, 0, sizeof(thread->used_cp));
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
memset(&thread->fpstate, 0, sizeof(union fp_state));
thread_notify(THREAD_NOTIFY_FLUSH, thread);
}
void release_thread(struct task_struct *dead_task)
{
}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
{
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
thread->cpu_context.sp = (unsigned long)childregs;
thread->cpu_context.pc = (unsigned long)ret_from_fork;
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
thread->tp_value = regs->ARM_r3;
thread_notify(THREAD_NOTIFY_COPY, thread);
return 0;
}
/*
* Fill in the task's elfregs structure for a core dump.
*/
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
{
elf_core_copy_regs(elfregs, task_pt_regs(t));
return 1;
}
/*
* fill in the fpe structure for a core dump...
*/
int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
{
struct thread_info *thread = current_thread_info();
int used_math = thread->used_cp[1] | thread->used_cp[2];
if (used_math)
memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
return used_math != 0;
}
EXPORT_SYMBOL(dump_fpu);
/*
* Shuffle the argument into the correct register before calling the
* thread function. r4 is the thread argument, r5 is the pointer to
* the thread function, and r6 points to the exit function.
*/
extern void kernel_thread_helper(void);
asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_helper, #function\n"
"kernel_thread_helper:\n"
#ifdef CONFIG_TRACE_IRQFLAGS
" bl trace_hardirqs_on\n"
#endif
" msr cpsr_c, r7\n"
" mov r0, r4\n"
" mov lr, r6\n"
" mov pc, r5\n"
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .popsection");
#ifdef CONFIG_ARM_UNWIND
extern void kernel_thread_exit(long code);
asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_exit, #function\n"
"kernel_thread_exit:\n"
" .fnstart\n"
" .cantunwind\n"
" bl do_exit\n"
" nop\n"
" .fnend\n"
" .size kernel_thread_exit, . - kernel_thread_exit\n"
" .popsection");
#else
#define kernel_thread_exit do_exit
#endif
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
regs.ARM_r4 = (unsigned long)arg;
regs.ARM_r5 = (unsigned long)fn;
regs.ARM_r6 = (unsigned long)kernel_thread_exit;
regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
do {
int ret = unwind_frame(&frame);
if (ret < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
} while (count ++ < 16);
return 0;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long range_end = mm->brk + 0x02000000;
return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
}
#ifdef CONFIG_MMU
/*
* The vectors page is always readable from user space for the
* atomic helpers and the signal restart code. Let's declare a mapping
* for it so it is visible through ptrace and /proc/<pid>/mem.
*/
int vectors_user_mapping(void)
{
struct mm_struct *mm = current->mm;
return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYEXEC |
VM_ALWAYSDUMP | VM_RESERVED,
NULL);
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
}
#endif
| gpl-2.0 |
NandanPhadke/oslab | net/ipv4/netfilter/iptable_mangle.c | 12 | 7213 | /*
* This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Extended to all five netfilter hooks by Brad Chapman & Harald Welte
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/route.h>
#include <linux/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("iptables mangle table");
#define MANGLE_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | \
(1 << NF_IP_LOCAL_IN) | \
(1 << NF_IP_FORWARD) | \
(1 << NF_IP_LOCAL_OUT) | \
(1 << NF_IP_POST_ROUTING))
/* Standard entry. */
struct ipt_standard
{
struct ipt_entry entry;
struct ipt_standard_target target;
};
struct ipt_error_target
{
struct ipt_entry_target target;
char errorname[IPT_FUNCTION_MAXNAMELEN];
};
struct ipt_error
{
struct ipt_entry entry;
struct ipt_error_target target;
};
/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
static struct
{
struct ipt_replace repl;
struct ipt_standard entries[5];
struct ipt_error term;
} initial_table __initdata
= { { "mangle", MANGLE_VALID_HOOKS, 6,
sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
{ [NF_IP_PRE_ROUTING] = 0,
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 },
{ [NF_IP_PRE_ROUTING] = 0,
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 },
0, NULL, { } },
{
/* PRE_ROUTING */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
/* LOCAL_IN */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
/* FORWARD */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
/* LOCAL_OUT */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
/* POST_ROUTING */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_standard),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
-NF_ACCEPT - 1 } },
},
/* ERROR */
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
0,
sizeof(struct ipt_entry),
sizeof(struct ipt_error),
0, { 0, 0 }, { } },
{ { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
{ } },
"ERROR"
}
}
};
static struct ipt_table packet_mangler = {
.name = "mangle",
.table = &initial_table.repl,
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.me = THIS_MODULE,
};
/* The work comes in here from netfilter.c. */
static unsigned int
ipt_route_hook(unsigned int hook,
struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(pskb, hook, in, out, &packet_mangler, NULL);
}
static unsigned int
ipt_local_hook(unsigned int hook,
struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
unsigned int ret;
u_int8_t tos;
u_int32_t saddr, daddr;
unsigned long nfmark;
/* root is playing with raw sockets. */
if ((*pskb)->len < sizeof(struct iphdr)
|| (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
if (net_ratelimit())
printk("ipt_hook: happy cracking.\n");
return NF_ACCEPT;
}
/* Save things which could affect route */
nfmark = (*pskb)->nfmark;
saddr = (*pskb)->nh.iph->saddr;
daddr = (*pskb)->nh.iph->daddr;
tos = (*pskb)->nh.iph->tos;
ret = ipt_do_table(pskb, hook, in, out, &packet_mangler, NULL);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE
&& ((*pskb)->nh.iph->saddr != saddr
|| (*pskb)->nh.iph->daddr != daddr
#ifdef CONFIG_IP_ROUTE_FWMARK
|| (*pskb)->nfmark != nfmark
#endif
|| (*pskb)->nh.iph->tos != tos))
return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
return ret;
}
static struct nf_hook_ops ipt_ops[] = {
{
.hook = ipt_route_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_PRE_ROUTING,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_LOCAL_IN,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_FORWARD,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_local_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_LOCAL_OUT,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_POST_ROUTING,
.priority = NF_IP_PRI_MANGLE,
},
};
static int __init init(void)
{
int ret;
/* Register table */
ret = ipt_register_table(&packet_mangler);
if (ret < 0)
return ret;
/* Register hooks */
ret = nf_register_hook(&ipt_ops[0]);
if (ret < 0)
goto cleanup_table;
ret = nf_register_hook(&ipt_ops[1]);
if (ret < 0)
goto cleanup_hook0;
ret = nf_register_hook(&ipt_ops[2]);
if (ret < 0)
goto cleanup_hook1;
ret = nf_register_hook(&ipt_ops[3]);
if (ret < 0)
goto cleanup_hook2;
ret = nf_register_hook(&ipt_ops[4]);
if (ret < 0)
goto cleanup_hook3;
return ret;
cleanup_hook3:
nf_unregister_hook(&ipt_ops[3]);
cleanup_hook2:
nf_unregister_hook(&ipt_ops[2]);
cleanup_hook1:
nf_unregister_hook(&ipt_ops[1]);
cleanup_hook0:
nf_unregister_hook(&ipt_ops[0]);
cleanup_table:
ipt_unregister_table(&packet_mangler);
return ret;
}
static void __exit fini(void)
{
unsigned int i;
for (i = 0; i < sizeof(ipt_ops)/sizeof(struct nf_hook_ops); i++)
nf_unregister_hook(&ipt_ops[i]);
ipt_unregister_table(&packet_mangler);
}
module_init(init);
module_exit(fini);
| gpl-2.0 |
ft-/OX820-uboot-2011.09 | arch/powerpc/lib/cache.c | 268 | 1672 | /*
* (C) Copyright 2002
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <asm/cache.h>
#include <watchdog.h>
void flush_cache(ulong start_addr, ulong size)
{
#ifndef CONFIG_5xx
ulong addr, start, end;
start = start_addr & ~(CONFIG_SYS_CACHELINE_SIZE - 1);
end = start_addr + size - 1;
for (addr = start; (addr <= end) && (addr >= start);
addr += CONFIG_SYS_CACHELINE_SIZE) {
asm volatile("dcbst 0,%0" : : "r" (addr) : "memory");
WATCHDOG_RESET();
}
/* wait for all dcbst to complete on bus */
asm volatile("sync" : : : "memory");
for (addr = start; (addr <= end) && (addr >= start);
addr += CONFIG_SYS_CACHELINE_SIZE) {
asm volatile("icbi 0,%0" : : "r" (addr) : "memory");
WATCHDOG_RESET();
}
asm volatile("sync" : : : "memory");
/* flush prefetch queue */
asm volatile("isync" : : : "memory");
#endif
}
| gpl-2.0 |
jdlfg/Mecha-kernel | mm/memory_hotplug.c | 268 | 21679 | /*
* linux/mm/memory_hotplug.c
*
* Copyright (C)
*/
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <asm/tlbflush.h>
#include "internal.h"
/* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size)
{
struct resource *res;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(!res);
res->name = "System RAM";
res->start = start;
res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
printk("System RAM resource %llx - %llx cannot be added\n",
(unsigned long long)res->start, (unsigned long long)res->end);
kfree(res);
res = NULL;
}
return res;
}
static void release_memory_resource(struct resource *res)
{
if (!res)
return;
release_resource(res);
kfree(res);
return;
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static void get_page_bootmem(unsigned long info, struct page *page, int type)
{
atomic_set(&page->_mapcount, type);
SetPagePrivate(page);
set_page_private(page, info);
atomic_inc(&page->_count);
}
void put_page_bootmem(struct page *page)
{
int type;
type = atomic_read(&page->_mapcount);
BUG_ON(type >= -1);
if (atomic_dec_return(&page->_count) == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
reset_page_mapcount(page);
__free_pages_bootmem(page, 0);
}
}
static void register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long *usemap, mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
if (!pfn_valid(start_pfn))
return;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
/* Get section's memmap address */
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
/*
* Get page for the memmap's phys address
* XXX: need more consideration for sparse_vmemmap...
*/
page = virt_to_page(memmap);
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
/* remember memmap's page */
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
usemap = __nr_to_section(section_nr)->pageblock_flags;
page = virt_to_page(usemap);
mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
}
void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
struct page *page;
struct zone *zone;
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
page = virt_to_page(pgdat);
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
zone = &pgdat->node_zones[0];
for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
if (zone->wait_table) {
nr_pages = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
page = virt_to_page(zone->wait_table);
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
}
}
pfn = pgdat->node_start_pfn;
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
register_page_bootmem_info_section(pfn);
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long old_zone_end_pfn;
zone_span_writelock(zone);
old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long old_pgdat_end_pfn =
pgdat->node_start_pfn + pgdat->node_spanned_pages;
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
pgdat->node_start_pfn;
}
static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
unsigned long flags;
zone_type = zone - pgdat->node_zones;
if (!zone->wait_table) {
int ret;
ret = init_currently_empty_zone(zone, phys_start_pfn,
nr_pages, MEMMAP_HOTPLUG);
if (ret)
return ret;
}
pgdat_resize_lock(zone->zone_pgdat, &flags);
grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
phys_start_pfn + nr_pages);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
memmap_init_zone(nr_pages, nid, zone_type,
phys_start_pfn, MEMMAP_HOTPLUG);
return 0;
}
static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn)
{
int nr_pages = PAGES_PER_SECTION;
int ret;
if (pfn_valid(phys_start_pfn))
return -EEXIST;
ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
if (ret < 0)
return ret;
ret = __add_zone(zone, phys_start_pfn);
if (ret < 0)
return ret;
return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
/*
* XXX: Freeing memmap with vmemmap is not implement yet.
* This should be removed later.
*/
return -EBUSY;
}
#else
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
unsigned long flags;
struct pglist_data *pgdat = zone->zone_pgdat;
int ret = -EINVAL;
if (!valid_section(ms))
return ret;
ret = unregister_memory_section(ms);
if (ret)
return ret;
pgdat_resize_lock(pgdat, &flags);
sparse_remove_one_section(zone, ms);
pgdat_resize_unlock(pgdat, &flags);
return 0;
}
#endif
/*
* Reasonably generic function for adding memory. It is
* expected that archs that support memory hotplug will
* call this function after deciding the zone to which to
* add the new pages.
*/
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i;
int err = 0;
int start_sec, end_sec;
/* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr(phys_start_pfn);
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
for (i = start_sec; i <= end_sec; i++) {
err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
/*
* EEXIST is finally dealt with by ioresource collision
* check. see add_memory() => register_memory_resource()
* Warning will be printed if there is collision.
*/
if (err && (err != -EEXIST))
break;
err = 0;
}
return err;
}
EXPORT_SYMBOL_GPL(__add_pages);
/**
* __remove_pages() - remove sections of pages from a zone
* @zone: zone from which pages need to be removed
* @phys_start_pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
*
* Generic helper function to remove section mappings and sysfs entries
* for the section of the memory we are removing. Caller needs to make
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i, ret = 0;
int sections_to_remove;
/*
* We can only remove entire sections
*/
BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
BUG_ON(nr_pages % PAGES_PER_SECTION);
sections_to_remove = nr_pages / PAGES_PER_SECTION;
for (i = 0; i < sections_to_remove; i++) {
unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
release_mem_region(pfn << PAGE_SHIFT,
PAGES_PER_SECTION << PAGE_SHIFT);
ret = __remove_section(zone, __pfn_to_section(pfn));
if (ret)
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(__remove_pages);
void online_page(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
totalram_pages++;
if (pfn >= num_physpages)
num_physpages = pfn + 1;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages++;
#endif
#ifdef CONFIG_FLATMEM
max_mapnr = max(page_to_pfn(page), max_mapnr);
#endif
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
}
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
unsigned long i;
unsigned long onlined_pages = *(unsigned long *)arg;
struct page *page;
if (PageReserved(pfn_to_page(start_pfn)))
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(start_pfn + i);
online_page(page);
onlined_pages++;
}
*(unsigned long *)arg = onlined_pages;
return 0;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
{
unsigned long onlined_pages = 0;
struct zone *zone;
int need_zonelists_rebuild = 0;
int nid;
int ret;
struct memory_notify arg;
arg.start_pfn = pfn;
arg.nr_pages = nr_pages;
arg.status_change_nid = -1;
nid = page_to_nid(pfn_to_page(pfn));
if (node_present_pages(nid) == 0)
arg.status_change_nid = nid;
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
if (ret) {
memory_notify(MEM_CANCEL_ONLINE, &arg);
return ret;
}
/*
* This doesn't need a lock to do pfn_to_page().
* The section can't be removed here because of the
* memory_block->state_mutex.
*/
zone = page_zone(pfn_to_page(pfn));
/*
* If this zone is not populated, then it is not in zonelist.
* This means the page allocator ignores this zone.
* So, zonelist must be updated after online.
*/
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
nr_pages, pfn);
memory_notify(MEM_CANCEL_ONLINE, &arg);
return ret;
}
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
zone_pcp_update(zone);
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
if (onlined_pages) {
kswapd_run(zone_to_nid(zone));
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
}
if (need_zonelists_rebuild)
build_all_zonelists();
else
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
return 0;
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
{
struct pglist_data *pgdat;
unsigned long zones_size[MAX_NR_ZONES] = {0};
unsigned long zholes_size[MAX_NR_ZONES] = {0};
unsigned long start_pfn = start >> PAGE_SHIFT;
pgdat = arch_alloc_nodedata(nid);
if (!pgdat)
return NULL;
arch_refresh_nodedata(nid, pgdat);
/* we can use NODE_DATA(nid) from here */
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
return pgdat;
}
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
{
arch_refresh_nodedata(nid, NULL);
arch_free_nodedata(pgdat);
return;
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
int __ref add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat = NULL;
int new_pgdat = 0;
struct resource *res;
int ret;
lock_system_sleep();
res = register_memory_resource(start, size);
ret = -EEXIST;
if (!res)
goto out;
if (!node_online(nid)) {
pgdat = hotadd_new_pgdat(nid, start);
ret = -ENOMEM;
if (!pgdat)
goto out;
new_pgdat = 1;
}
/* call arch's memory hotadd */
ret = arch_add_memory(nid, start, size);
if (ret < 0)
goto error;
/* we online node here. we can't roll back from here. */
node_set_online(nid);
if (new_pgdat) {
ret = register_one_node(nid);
/*
* If sysfs file of new node can't create, cpu on the node
* can't be hot-added. There is no rollback way now.
* So, check by BUG_ON() to catch it reluctantly..
*/
BUG_ON(ret);
}
goto out;
error:
/* rollback pgdat allocation and others */
if (new_pgdat)
rollback_node_hotadd(nid, pgdat);
if (res)
release_memory_resource(res);
out:
unlock_system_sleep();
return ret;
}
EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
* set and the size of the free page is given by page_order(). Using this,
* the function determines if the pageblock contains only free pages.
* Due to buddy contraints, a free page at least the size of a pageblock will
* be located at the start of the pageblock
*/
static inline int pageblock_free(struct page *page)
{
return PageBuddy(page) && page_order(page) >= pageblock_order;
}
/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
{
int pageblocks_stride;
/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
/* Move forward by at least 1 * pageblock_nr_pages */
pageblocks_stride = 1;
/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page))
pageblocks_stride += page_order(page) - pageblock_order;
return page + (pageblocks_stride * pageblock_nr_pages);
}
/* Checks if this range of memory is likely to be hot-removable. */
int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
int type;
struct page *page = pfn_to_page(start_pfn);
struct page *end_page = page + nr_pages;
/* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) {
type = get_pageblock_migratetype(page);
/*
* A pageblock containing MOVABLE or free pages is considered
* removable
*/
if (type != MIGRATE_MOVABLE && !pageblock_free(page))
return 0;
/*
* A pageblock starting with a PageReserved page is not
* considered removable.
*/
if (PageReserved(page))
return 0;
}
/* All pageblocks in the memory block are likely to be hot-removable */
return 1;
}
/*
* Confirm all pages in a range [start, end) is belongs to the same zone.
*/
static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct zone *zone = NULL;
struct page *page;
int i;
for (pfn = start_pfn;
pfn < end_pfn;
pfn += MAX_ORDER_NR_PAGES) {
i = 0;
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
i++;
if (i == MAX_ORDER_NR_PAGES)
continue;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
return 0;
zone = page_zone(page);
}
return 1;
}
/*
* Scanning pfn is much easier than scanning lru list.
* Scan pfn from start to end and Find LRU page.
*/
int scan_lru_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
for (pfn = start; pfn < end; pfn++) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
}
}
return 0;
}
static struct page *
hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
{
/* This should be improooooved!! */
return alloc_page(GFP_HIGHUSER_MOVABLE);
}
#define NR_OFFLINE_AT_ONCE_PAGES (256)
static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
int not_managed = 0;
int ret = 0;
LIST_HEAD(source);
for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (!page_count(page))
continue;
/*
* We can skip free pages. And we can only deal with pages on
* LRU.
*/
ret = isolate_lru_page(page);
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
move_pages--;
} else {
/* Becasue we don't have big zone->lock. we should
check this again here. */
if (page_count(page))
not_managed++;
#ifdef CONFIG_DEBUG_VM
printk(KERN_INFO "removing from LRU failed"
" %lx/%d/%lx\n",
pfn, page_count(page), page->flags);
#endif
}
}
ret = -EBUSY;
if (not_managed) {
if (!list_empty(&source))
putback_lru_pages(&source);
goto out;
}
ret = 0;
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
out:
return ret;
}
/*
* remove from free_area[] and mark all as Reserved.
*/
static int
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
void *data)
{
__offline_isolated_pages(start, start + nr_pages);
return 0;
}
static void
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
offline_isolated_pages_cb);
}
/*
* Check all pages in range, recoreded as memory resource, are isolated.
*/
static int
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
void *data)
{
int ret;
long offlined = *(long *)data;
ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
offlined = nr_pages;
if (!ret)
*(long *)data += offlined;
return ret;
}
static long
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
{
long offlined = 0;
int ret;
ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
check_pages_isolated_cb);
if (ret < 0)
offlined = (long)ret;
return offlined;
}
int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
unsigned long pfn, nr_pages, expire;
long offlined_pages;
int ret, drain, retry_max, node;
struct zone *zone;
struct memory_notify arg;
BUG_ON(start_pfn >= end_pfn);
/* at least, alignment against pageblock is necessary */
if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
return -EINVAL;
if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
return -EINVAL;
/* This makes hotplug much easier...and readable.
we assume this for now. .*/
if (!test_pages_in_a_zone(start_pfn, end_pfn))
return -EINVAL;
lock_system_sleep();
zone = page_zone(pfn_to_page(start_pfn));
node = zone_to_nid(zone);
nr_pages = end_pfn - start_pfn;
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn);
if (ret)
goto out;
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
arg.status_change_nid = -1;
if (nr_pages >= node_present_pages(node))
arg.status_change_nid = node;
ret = memory_notify(MEM_GOING_OFFLINE, &arg);
ret = notifier_to_errno(ret);
if (ret)
goto failed_removal;
pfn = start_pfn;
expire = jiffies + timeout;
drain = 0;
retry_max = 5;
repeat:
/* start memory hot removal */
ret = -EAGAIN;
if (time_after(jiffies, expire))
goto failed_removal;
ret = -EINTR;
if (signal_pending(current))
goto failed_removal;
ret = 0;
if (drain) {
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
drain_all_pages();
}
pfn = scan_lru_pages(start_pfn, end_pfn);
if (pfn) { /* We have page on LRU */
ret = do_migrate_range(pfn, end_pfn);
if (!ret) {
drain = 1;
goto repeat;
} else {
if (ret < 0)
if (--retry_max == 0)
goto failed_removal;
yield();
drain = 1;
goto repeat;
}
}
/* drain all zone's lru pagevec, this is asyncronous... */
lru_add_drain_all();
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
drain_all_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
ret = -EBUSY;
goto failed_removal;
}
printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
/* Ok, all of our target is islaoted.
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn);
/* removal success */
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
unlock_system_sleep();
return 0;
failed_removal:
printk(KERN_INFO "memory offlining %lx to %lx failed\n",
start_pfn, end_pfn);
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn);
out:
unlock_system_sleep();
return ret;
}
int remove_memory(u64 start, u64 size)
{
unsigned long start_pfn, end_pfn;
start_pfn = PFN_DOWN(start);
end_pfn = start_pfn + PFN_DOWN(size);
return offline_pages(start_pfn, end_pfn, 120 * HZ);
}
#else
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
EXPORT_SYMBOL_GPL(remove_memory);
| gpl-2.0 |
googyanas/Googy-Max-N4-Kernel | arch/arm/plat-samsung/cpu.c | 268 | 1391 | /* linux/arch/arm/plat-samsung/cpu.c
*
* Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung CPU Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/map.h>
#include <plat/cpu.h>
unsigned long samsung_cpu_id;
static unsigned int samsung_cpu_rev;
unsigned int samsung_chip_id[2];
unsigned int samsung_rev(void)
{
return samsung_cpu_rev;
}
EXPORT_SYMBOL(samsung_rev);
void __init s3c24xx_init_cpu(void)
{
/* nothing here yet */
samsung_cpu_rev = 0;
}
void __init s3c64xx_init_cpu(void)
{
samsung_cpu_id = __raw_readl(S3C_VA_SYS + 0x118);
if (!samsung_cpu_id) {
/*
* S3C6400 has the ID register in a different place,
* and needs a write before it can be read.
*/
__raw_writel(0x0, S3C_VA_SYS + 0xA1C);
samsung_cpu_id = __raw_readl(S3C_VA_SYS + 0xA1C);
}
samsung_cpu_rev = 0;
}
void __init s5p_init_cpu(void __iomem *cpuid_addr)
{
samsung_cpu_id = __raw_readl(cpuid_addr);
samsung_cpu_rev = samsung_cpu_id & 0xFF;
samsung_chip_id[0] = __raw_readl(cpuid_addr + CHIPID0_OFFSET);
samsung_chip_id[1] = __raw_readl(cpuid_addr + CHIPID1_OFFSET) & 0xFFFF;
}
| gpl-2.0 |
TimesysGit/advantech-linux | mm/readahead.c | 524 | 16239 | /*
* mm/readahead.c - address_space-level file readahead.
*
* Copyright (C) 2002, Linus Torvalds
*
* 09Apr2002 Andrew Morton
* Initial version.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/file.h>
/*
* Initialise a struct file's readahead state. Assumes that the caller has
* memset *ra to zero.
*/
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
ra->ra_pages = mapping->backing_dev_info->ra_pages;
ra->prev_pos = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
/*
* see if a page needs releasing upon read_cache_pages() failure
* - the caller of read_cache_pages() may have set PG_private or PG_fscache
* before calling, such as the NFS fs marking pages that are cached locally
* on disk, thus we need to give the fs a chance to clean up in the event of
* an error
*/
static void read_cache_pages_invalidate_page(struct address_space *mapping,
struct page *page)
{
if (page_has_private(page)) {
if (!trylock_page(page))
BUG();
page->mapping = mapping;
do_invalidatepage(page, 0);
page->mapping = NULL;
unlock_page(page);
}
page_cache_release(page);
}
/*
* release a list of pages, invalidating them first if need be
*/
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
struct list_head *pages)
{
struct page *victim;
while (!list_empty(pages)) {
victim = list_to_page(pages);
list_del(&victim->lru);
read_cache_pages_invalidate_page(mapping, victim);
}
}
/**
* read_cache_pages - populate an address space with some pages & start reads against them
* @mapping: the address_space
* @pages: The address of a list_head which contains the target pages. These
* pages have their ->index populated and are otherwise uninitialised.
* @filler: callback routine for filling a single page.
* @data: private data for the callback routine.
*
* Hides the details of the LRU cache etc from the filesystems.
*/
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
int (*filler)(void *, struct page *), void *data)
{
struct page *page;
int ret = 0;
while (!list_empty(pages)) {
page = list_to_page(pages);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
read_cache_pages_invalidate_page(mapping, page);
continue;
}
page_cache_release(page);
ret = filler(data, page);
if (unlikely(ret)) {
read_cache_pages_invalidate_pages(mapping, pages);
break;
}
task_io_account_read(PAGE_CACHE_SIZE);
}
return ret;
}
EXPORT_SYMBOL(read_cache_pages);
static int read_pages(struct address_space *mapping, struct file *filp,
struct list_head *pages, unsigned nr_pages)
{
struct blk_plug plug;
unsigned page_idx;
int ret;
blk_start_plug(&plug);
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
/* Clean up the remaining pages */
put_pages_list(pages);
goto out;
}
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages);
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
mapping->a_ops->readpage(filp, page);
}
page_cache_release(page);
}
ret = 0;
out:
blk_finish_plug(&plug);
return ret;
}
/*
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
* the pages first, then submits them all for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*
* Returns the number of pages requested, or the maximum amount of I/O allowed.
*/
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size)
{
struct inode *inode = mapping->host;
struct page *page;
unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool);
int page_idx;
int ret = 0;
loff_t isize = i_size_read(inode);
if (isize == 0)
goto out;
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
/*
* Preallocate as many pages as we will need.
*/
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
pgoff_t page_offset = offset + page_idx;
if (page_offset > end_index)
break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_offset);
rcu_read_unlock();
if (page)
continue;
page = page_cache_alloc_readahead(mapping);
if (!page)
break;
page->index = page_offset;
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
ret++;
}
/*
* Now start the IO. We ignore I/O errors - if the page is not
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
if (ret)
read_pages(mapping, filp, &page_pool, ret);
BUG_ON(!list_empty(&page_pool));
out:
return ret;
}
/*
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
* memory at once.
*/
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read)
{
int ret = 0;
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
return -EINVAL;
nr_to_read = max_sane_readahead(nr_to_read);
while (nr_to_read) {
int err;
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
err = __do_page_cache_readahead(mapping, filp,
offset, this_chunk, 0);
if (err < 0) {
ret = err;
break;
}
ret += err;
offset += this_chunk;
nr_to_read -= this_chunk;
}
return ret;
}
/*
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
* sensible upper limit.
*/
unsigned long max_sane_readahead(unsigned long nr)
{
return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
}
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
{
int actual;
actual = __do_page_cache_readahead(mapping, filp,
ra->start, ra->size, ra->async_size);
return actual;
}
/*
* Set the initial window size, round to next power of 2 and square
* for small size, x 4 for medium, and x 2 for large
* for 128k (32 page) max ra
* 1-8 page = 32k initial, > 8 page = 128k initial
*/
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
{
unsigned long newsize = roundup_pow_of_two(size);
if (newsize <= max / 32)
newsize = newsize * 4;
else if (newsize <= max / 4)
newsize = newsize * 2;
else
newsize = max;
return newsize;
}
/*
* Get the previous window size, ramp it up, and
* return it as the new window size.
*/
static unsigned long get_next_ra_size(struct file_ra_state *ra,
unsigned long max)
{
unsigned long cur = ra->size;
unsigned long newsize;
if (cur < max / 16)
newsize = 4 * cur;
else
newsize = 2 * cur;
return min(newsize, max);
}
/*
* On-demand readahead design.
*
* The fields in struct file_ra_state represent the most-recently-executed
* readahead attempt:
*
* |<----- async_size ---------|
* |------------------- size -------------------->|
* |==================#===========================|
* ^start ^page marked with PG_readahead
*
* To overlap application thinking time and disk I/O time, we do
* `readahead pipelining': Do not wait until the application consumed all
* readahead pages and stalled on the missing page at readahead_index;
* Instead, submit an asynchronous readahead I/O as soon as there are
* only async_size pages left in the readahead window. Normally async_size
* will be equal to size, for maximum pipelining.
*
* In interleaved sequential reads, concurrent streams on the same fd can
* be invalidating each other's readahead state. So we flag the new readahead
* page at (start+size-async_size) with PG_readahead, and use it as readahead
* indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups.
*
* prev_pos tracks the last visited byte in the _previous_ read request.
* It should be maintained by the caller, and will be used for detecting
* small random reads. Note that the readahead algorithm checks loosely
* for sequential patterns. Hence interleaved reads might be served as
* sequential ones.
*
* There is a special-case: if the first page which the application tries to
* read happens to be the first page of the file, it is assumed that a linear
* read is about to happen and the window is immediately set to the initial size
* based on I/O request size and the max_readahead.
*
* The code ramps up the readahead size aggressively at first, but slow down as
* it approaches max_readhead.
*/
/*
* Count contiguously cached pages from @offset-1 to @offset-@max,
* this count is a conservative estimation of
* - length of the sequential read sequence, or
* - thrashing threshold in memory tight systems
*/
static pgoff_t count_history_pages(struct address_space *mapping,
struct file_ra_state *ra,
pgoff_t offset, unsigned long max)
{
pgoff_t head;
rcu_read_lock();
head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
rcu_read_unlock();
return offset - 1 - head;
}
/*
* page cache context based read-ahead
*/
static int try_context_readahead(struct address_space *mapping,
struct file_ra_state *ra,
pgoff_t offset,
unsigned long req_size,
unsigned long max)
{
pgoff_t size;
size = count_history_pages(mapping, ra, offset, max);
/*
* no history pages:
* it could be a random read
*/
if (!size)
return 0;
/*
* starts from beginning of file:
* it is a strong indication of long-run stream (or whole-file-read)
*/
if (size >= offset)
size *= 2;
ra->start = offset;
ra->size = get_init_ra_size(size + req_size, max);
ra->async_size = ra->size;
return 1;
}
/*
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static unsigned long
ondemand_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
unsigned long max = max_sane_readahead(ra->ra_pages);
/*
* start of file
*/
if (!offset)
goto initial_readahead;
/*
* It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
if ((offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}
/*
* Hit a marked page without valid readahead state.
* E.g. interleaved reads.
* Query the pagecache for async_size, which normally equals to
* readahead size. Ramp it up and use it as the new readahead size.
*/
if (hit_readahead_marker) {
pgoff_t start;
rcu_read_lock();
start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
rcu_read_unlock();
if (!start || start - offset > max)
return 0;
ra->start = start;
ra->size = start - offset; /* old async_size */
ra->size += req_size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}
/*
* oversize read
*/
if (req_size > max)
goto initial_readahead;
/*
* sequential cache miss
*/
if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
goto initial_readahead;
/*
* Query the page cache and look for the traces(cached history pages)
* that a sequential stream would leave behind.
*/
if (try_context_readahead(mapping, ra, offset, req_size, max))
goto readit;
/*
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
initial_readahead:
ra->start = offset;
ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
readit:
/*
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
*/
if (offset == ra->start && ra->size == ra->async_size) {
ra->async_size = get_next_ra_size(ra, max);
ra->size += ra->async_size;
}
return ra_submit(ra, mapping, filp);
}
/**
* page_cache_sync_readahead - generic file readahead
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @filp: passed on to ->readpage() and ->readpages()
* @offset: start offset into @mapping, in pagecache page-sized units
* @req_size: hint: total size of the read which the caller is performing in
* pagecache pages
*
* page_cache_sync_readahead() should be called when a cache miss happened:
* it will submit the read. The readahead logic may decide to piggyback more
* pages onto the read request if access patterns suggest it will improve
* performance.
*/
void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
pgoff_t offset, unsigned long req_size)
{
/* no read-ahead */
if (!ra->ra_pages)
return;
/* be dumb */
if (filp && (filp->f_mode & FMODE_RANDOM)) {
force_page_cache_readahead(mapping, filp, offset, req_size);
return;
}
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, false, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
/**
* page_cache_async_readahead - file readahead for marked pages
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @filp: passed on to ->readpage() and ->readpages()
* @page: the page at @offset which has the PG_readahead flag set
* @offset: start offset into @mapping, in pagecache page-sized units
* @req_size: hint: total size of the read which the caller is performing in
* pagecache pages
*
* page_cache_async_readahead() should be called when a page is used which
* has the PG_readahead flag; this is a marker to suggest that the application
* has used up enough of the readahead window that we should start pulling in
* more pages.
*/
void
page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
struct page *page, pgoff_t offset,
unsigned long req_size)
{
/* no read-ahead */
if (!ra->ra_pages)
return;
/*
* Same bit is used for PG_readahead and PG_reclaim.
*/
if (PageWriteback(page))
return;
ClearPageReadahead(page);
/*
* Defer asynchronous read-ahead on IO congestion.
*/
if (bdi_read_congested(mapping->backing_dev_info))
return;
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
static ssize_t
do_readahead(struct address_space *mapping, struct file *filp,
pgoff_t index, unsigned long nr)
{
if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
return -EINVAL;
force_page_cache_readahead(mapping, filp, index, nr);
return 0;
}
SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
{
ssize_t ret;
struct fd f;
ret = -EBADF;
f = fdget(fd);
if (f.file) {
if (f.file->f_mode & FMODE_READ) {
struct address_space *mapping = f.file->f_mapping;
pgoff_t start = offset >> PAGE_CACHE_SHIFT;
pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
unsigned long len = end - start + 1;
ret = do_readahead(mapping, f.file, start, len);
}
fdput(f);
}
return ret;
}
| gpl-2.0 |
RaymanFX/kernel_nvidia_tegra | drivers/usb/wusbcore/wa-hc.c | 780 | 2568 | /*
* Wire Adapter Host Controller Driver
* Common items to HWA and DWA based HCDs
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*/
#include "wusbhc.h"
#include "wa-hc.h"
/**
* Assumes
*
* wa->usb_dev and wa->usb_iface initialized and refcounted,
* wa->wa_descr initialized.
*/
int wa_create(struct wahc *wa, struct usb_interface *iface)
{
int result;
struct device *dev = &iface->dev;
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
/* Fill up Data Transfer EP pointers */
wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
if (wa->xfer_result == NULL)
goto error_xfer_result_alloc;
result = wa_nep_create(wa, iface);
if (result < 0) {
dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
result);
goto error_nep_create;
}
return 0;
error_nep_create:
kfree(wa->xfer_result);
error_xfer_result_alloc:
wa_rpipes_destroy(wa);
error_rpipes_create:
return result;
}
EXPORT_SYMBOL_GPL(wa_create);
void __wa_destroy(struct wahc *wa)
{
if (wa->dti_urb) {
usb_kill_urb(wa->dti_urb);
usb_put_urb(wa->dti_urb);
usb_kill_urb(wa->buf_in_urb);
usb_put_urb(wa->buf_in_urb);
}
kfree(wa->xfer_result);
wa_nep_destroy(wa);
wa_rpipes_destroy(wa);
}
EXPORT_SYMBOL_GPL(__wa_destroy);
/**
* wa_reset_all - reset the WA device
* @wa: the WA to be reset
*
* For HWAs the radio controller and all other PALs are also reset.
*/
void wa_reset_all(struct wahc *wa)
{
/* FIXME: assuming HWA. */
wusbhc_reset_all(wa->wusb);
}
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
MODULE_LICENSE("GPL");
| gpl-2.0 |
OctaviaBlake/kernel-msm | drivers/acpi/glue.c | 1804 | 8722 | /*
* Link physical devices with ACPI devices support
*
* Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com>
* Copyright (c) 2005 Intel Corp.
*
* This file is released under the GPLv2.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include "internal.h"
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
#define DBG(fmt, ...) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
#else
#define DBG(fmt, ...) \
do { \
if (0) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
} while (0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
if (type && type->match && type->find_device) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(register_acpi_bus_type);
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return 0;
if (type) {
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "bus type %s unregistered\n",
type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->match(dev)) {
ret = tmp;
break;
}
}
up_read(&bus_type_sem);
return ret;
}
static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
struct acpi_device *adev = NULL;
acpi_bus_get_device(handle, &adev);
if (adev) {
*ret_p = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
unsigned long long sta;
acpi_status status;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return false;
if (is_bridge) {
void *test = NULL;
/* Check if this object has at least one child device. */
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_dev_present, NULL, NULL, &test);
return !!test;
}
return true;
}
struct find_child_context {
u64 addr;
bool is_bridge;
acpi_handle ret;
bool ret_checked;
};
static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
void *data, void **not_used)
{
struct find_child_context *context = data;
unsigned long long addr;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status) || addr != context->addr)
return AE_OK;
if (!context->ret) {
/* This is the first matching object. Save its handle. */
context->ret = handle;
return AE_OK;
}
/*
* There is more than one matching object with the same _ADR value.
* That really is unexpected, so we are kind of beyond the scope of the
* spec here. We have to choose which one to return, though.
*
* First, check if the previously found object is good enough and return
* its handle if so. Second, check the same for the object that we've
* just found.
*/
if (!context->ret_checked) {
if (acpi_extra_checks_passed(context->ret, context->is_bridge))
return AE_CTRL_TERMINATE;
else
context->ret_checked = true;
}
if (acpi_extra_checks_passed(handle, context->is_bridge)) {
context->ret = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
{
if (parent) {
struct find_child_context context = {
.addr = addr,
.is_bridge = is_bridge,
};
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
NULL, &context, NULL);
return context.ret;
}
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_find_child);
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
if (handle) {
dev_warn(dev, "ACPI handle is already set\n");
return -EINVAL;
} else {
handle = ACPI_HANDLE(dev);
}
}
if (!handle)
return -EINVAL;
get_device(dev);
status = acpi_bus_get_device(handle, &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
/* Sanity check. */
list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
goto err_free;
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
if (!ACPI_HANDLE(dev))
ACPI_HANDLE_SET(dev, acpi_dev->handle);
if (!physical_node->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
else
sprintf(physical_node_name,
"physical_node%d", physical_node->node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
physical_node_name);
retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
"firmware_node");
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
return 0;
err:
ACPI_HANDLE_SET(dev, NULL);
put_device(dev);
return retval;
err_free:
mutex_unlock(&acpi_dev->physical_node_lock);
kfree(physical_node);
goto err;
}
static int acpi_unbind_one(struct device *dev)
{
struct acpi_device_physical_node *entry;
struct acpi_device *acpi_dev;
acpi_status status;
struct list_head *node, *next;
if (!ACPI_HANDLE(dev))
return 0;
status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
entry = list_entry(node, struct acpi_device_physical_node,
node);
if (entry->dev != dev)
continue;
list_del(node);
clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;
if (!entry->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
else
sprintf(physical_node_name,
"physical_node%d", entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
ACPI_HANDLE_SET(dev, NULL);
/* acpi_bind_one increase refcnt by one */
put_device(dev);
kfree(entry);
}
mutex_unlock(&acpi_dev->physical_node_lock);
return 0;
err:
dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
return -EINVAL;
}
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
acpi_handle handle;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret && type) {
ret = type->find_device(dev, &handle);
if (ret) {
DBG("Unable to get handle for %s\n", dev_name(dev));
goto out;
}
ret = acpi_bind_one(dev, handle);
if (ret)
goto out;
}
if (type && type->setup)
type->setup(dev);
out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
DBG("Device %s -> No ACPI support\n", dev_name(dev));
#endif
return ret;
}
static int acpi_platform_notify_remove(struct device *dev)
{
struct acpi_bus_type *type;
type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
acpi_unbind_one(dev);
return 0;
}
int __init init_acpi_device_notify(void)
{
if (platform_notify || platform_notify_remove) {
printk(KERN_ERR PREFIX "Can't use platform_notify\n");
return 0;
}
platform_notify = acpi_platform_notify;
platform_notify_remove = acpi_platform_notify_remove;
return 0;
}
| gpl-2.0 |
WildfireDEV/android_kernel_samsung_s6 | fs/hpfs/namei.c | 2060 | 16048 | /*
* linux/fs/hpfs/namei.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* adding & removing files & directories
*/
#include <linux/sched.h>
#include "hpfs_fn.h"
static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh0;
struct buffer_head *bh;
struct hpfs_dirent *de;
struct fnode *fnode;
struct dnode *dnode;
struct inode *result;
fnode_secno fno;
dnode_secno dno;
int r;
struct hpfs_dirent dee;
int err;
if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
if (!fnode)
goto bail;
dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0);
if (!dnode)
goto bail1;
memset(&dee, 0, sizeof dee);
dee.directory = 1;
if (!(mode & 0222)) dee.read_only = 1;
/*dee.archive = 0;*/
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
result = new_inode(dir->i_sb);
if (!result)
goto bail2;
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
hpfs_i(result)->i_dno = dno;
result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
result->i_ctime.tv_nsec = 0;
result->i_mtime.tv_nsec = 0;
result->i_atime.tv_nsec = 0;
hpfs_i(result)->i_ea_size = 0;
result->i_mode |= S_IFDIR;
result->i_op = &hpfs_dir_iops;
result->i_fop = &hpfs_dir_ops;
result->i_blocks = 4;
result->i_size = 2048;
set_nlink(result, 2);
if (dee.read_only)
result->i_mode &= ~0222;
r = hpfs_add_dirent(dir, name, len, &dee);
if (r == 1)
goto bail3;
if (r == -1) {
err = -EEXIST;
goto bail3;
}
fnode->len = len;
memcpy(fnode->name, name, len > 15 ? 15 : len);
fnode->up = cpu_to_le32(dir->i_ino);
fnode->flags |= FNODE_dir;
fnode->btree.n_free_nodes = 7;
fnode->btree.n_used_nodes = 1;
fnode->btree.first_free = cpu_to_le16(0x14);
fnode->u.external[0].disk_secno = cpu_to_le32(dno);
fnode->u.external[0].file_secno = cpu_to_le32(-1);
dnode->root_dnode = 1;
dnode->up = cpu_to_le32(fno);
de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0);
de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
if (!(mode & 0222)) de->read_only = 1;
de->first = de->directory = 1;
/*de->hidden = de->system = 0;*/
de->fnode = cpu_to_le32(fno);
mark_buffer_dirty(bh);
brelse(bh);
hpfs_mark_4buffers_dirty(&qbh0);
hpfs_brelse4(&qbh0);
inc_nlink(dir);
insert_inode_hash(result);
if (!uid_eq(result->i_uid, current_fsuid()) ||
!gid_eq(result->i_gid, current_fsgid()) ||
result->i_mode != (mode | S_IFDIR)) {
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
result->i_mode = mode | S_IFDIR;
hpfs_write_inode_nolock(result);
}
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
return 0;
bail3:
iput(result);
bail2:
hpfs_brelse4(&qbh0);
hpfs_free_dnode(dir->i_sb, dno);
bail1:
brelse(bh);
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct inode *result = NULL;
struct buffer_head *bh;
struct fnode *fnode;
fnode_secno fno;
int r;
struct hpfs_dirent dee;
int err;
if ((err = hpfs_chk_name(name, &len)))
return err==-ENOENT ? -EINVAL : err;
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
if (!fnode)
goto bail;
memset(&dee, 0, sizeof dee);
if (!(mode & 0222)) dee.read_only = 1;
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
result = new_inode(dir->i_sb);
if (!result)
goto bail1;
hpfs_init_inode(result);
result->i_ino = fno;
result->i_mode |= S_IFREG;
result->i_mode &= ~0111;
result->i_op = &hpfs_file_iops;
result->i_fop = &hpfs_file_ops;
set_nlink(result, 1);
hpfs_i(result)->i_parent_dir = dir->i_ino;
result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
result->i_ctime.tv_nsec = 0;
result->i_mtime.tv_nsec = 0;
result->i_atime.tv_nsec = 0;
hpfs_i(result)->i_ea_size = 0;
if (dee.read_only)
result->i_mode &= ~0222;
result->i_blocks = 1;
result->i_size = 0;
result->i_data.a_ops = &hpfs_aops;
hpfs_i(result)->mmu_private = 0;
r = hpfs_add_dirent(dir, name, len, &dee);
if (r == 1)
goto bail2;
if (r == -1) {
err = -EEXIST;
goto bail2;
}
fnode->len = len;
memcpy(fnode->name, name, len > 15 ? 15 : len);
fnode->up = cpu_to_le32(dir->i_ino);
mark_buffer_dirty(bh);
brelse(bh);
insert_inode_hash(result);
if (!uid_eq(result->i_uid, current_fsuid()) ||
!gid_eq(result->i_gid, current_fsgid()) ||
result->i_mode != (mode | S_IFREG)) {
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
result->i_mode = mode | S_IFREG;
hpfs_write_inode_nolock(result);
}
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
return 0;
bail2:
iput(result);
bail1:
brelse(bh);
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct buffer_head *bh;
struct fnode *fnode;
fnode_secno fno;
int r;
struct hpfs_dirent dee;
struct inode *result = NULL;
int err;
if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
if (!new_valid_dev(rdev))
return -EINVAL;
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
if (!fnode)
goto bail;
memset(&dee, 0, sizeof dee);
if (!(mode & 0222)) dee.read_only = 1;
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
result = new_inode(dir->i_sb);
if (!result)
goto bail1;
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
result->i_ctime.tv_nsec = 0;
result->i_mtime.tv_nsec = 0;
result->i_atime.tv_nsec = 0;
hpfs_i(result)->i_ea_size = 0;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
set_nlink(result, 1);
result->i_size = 0;
result->i_blocks = 1;
init_special_inode(result, mode, rdev);
r = hpfs_add_dirent(dir, name, len, &dee);
if (r == 1)
goto bail2;
if (r == -1) {
err = -EEXIST;
goto bail2;
}
fnode->len = len;
memcpy(fnode->name, name, len > 15 ? 15 : len);
fnode->up = cpu_to_le32(dir->i_ino);
mark_buffer_dirty(bh);
insert_inode_hash(result);
hpfs_write_inode_nolock(result);
d_instantiate(dentry, result);
brelse(bh);
hpfs_unlock(dir->i_sb);
return 0;
bail2:
iput(result);
bail1:
brelse(bh);
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct buffer_head *bh;
struct fnode *fnode;
fnode_secno fno;
int r;
struct hpfs_dirent dee;
struct inode *result;
int err;
if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
hpfs_lock(dir->i_sb);
if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
hpfs_unlock(dir->i_sb);
return -EPERM;
}
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
if (!fnode)
goto bail;
memset(&dee, 0, sizeof dee);
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
result = new_inode(dir->i_sb);
if (!result)
goto bail1;
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
result->i_ctime.tv_nsec = 0;
result->i_mtime.tv_nsec = 0;
result->i_atime.tv_nsec = 0;
hpfs_i(result)->i_ea_size = 0;
result->i_mode = S_IFLNK | 0777;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
result->i_blocks = 1;
set_nlink(result, 1);
result->i_size = strlen(symlink);
result->i_op = &page_symlink_inode_operations;
result->i_data.a_ops = &hpfs_symlink_aops;
r = hpfs_add_dirent(dir, name, len, &dee);
if (r == 1)
goto bail2;
if (r == -1) {
err = -EEXIST;
goto bail2;
}
fnode->len = len;
memcpy(fnode->name, name, len > 15 ? 15 : len);
fnode->up = cpu_to_le32(dir->i_ino);
hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
mark_buffer_dirty(bh);
brelse(bh);
insert_inode_hash(result);
hpfs_write_inode_nolock(result);
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
return 0;
bail2:
iput(result);
bail1:
brelse(bh);
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
struct inode *inode = dentry->d_inode;
dnode_secno dno;
int r;
int rep = 0;
int err;
hpfs_lock(dir->i_sb);
hpfs_adjust_length(name, &len);
again:
err = -ENOENT;
de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
goto out;
err = -EPERM;
if (de->first)
goto out1;
err = -EISDIR;
if (de->directory)
goto out1;
r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
switch (r) {
case 1:
hpfs_error(dir->i_sb, "there was error when removing dirent");
err = -EFSERROR;
break;
case 2: /* no space for deleting, try to truncate file */
err = -ENOSPC;
if (rep++)
break;
dentry_unhash(dentry);
if (!d_unhashed(dentry)) {
hpfs_unlock(dir->i_sb);
return -ENOSPC;
}
if (generic_permission(inode, MAY_WRITE) ||
!S_ISREG(inode->i_mode) ||
get_write_access(inode)) {
d_rehash(dentry);
} else {
struct iattr newattrs;
/*printk("HPFS: truncating file before delete.\n");*/
newattrs.ia_size = 0;
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
err = notify_change(dentry, &newattrs);
put_write_access(inode);
if (!err)
goto again;
}
hpfs_unlock(dir->i_sb);
return -ENOSPC;
default:
drop_nlink(inode);
err = 0;
}
goto out;
out1:
hpfs_brelse4(&qbh);
out:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
struct quad_buffer_head qbh;
struct hpfs_dirent *de;
struct inode *inode = dentry->d_inode;
dnode_secno dno;
int n_items = 0;
int err;
int r;
hpfs_adjust_length(name, &len);
hpfs_lock(dir->i_sb);
err = -ENOENT;
de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
goto out;
err = -EPERM;
if (de->first)
goto out1;
err = -ENOTDIR;
if (!de->directory)
goto out1;
hpfs_count_dnodes(dir->i_sb, hpfs_i(inode)->i_dno, NULL, NULL, &n_items);
err = -ENOTEMPTY;
if (n_items)
goto out1;
r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
switch (r) {
case 1:
hpfs_error(dir->i_sb, "there was error when removing dirent");
err = -EFSERROR;
break;
case 2:
err = -ENOSPC;
break;
default:
drop_nlink(dir);
clear_nlink(inode);
err = 0;
}
goto out;
out1:
hpfs_brelse4(&qbh);
out:
hpfs_unlock(dir->i_sb);
return err;
}
static int hpfs_symlink_readpage(struct file *file, struct page *page)
{
char *link = kmap(page);
struct inode *i = page->mapping->host;
struct fnode *fnode;
struct buffer_head *bh;
int err;
err = -EIO;
hpfs_lock(i->i_sb);
if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh)))
goto fail;
err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE);
brelse(bh);
if (err)
goto fail;
hpfs_unlock(i->i_sb);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
fail:
hpfs_unlock(i->i_sb);
SetPageError(page);
kunmap(page);
unlock_page(page);
return err;
}
const struct address_space_operations hpfs_symlink_aops = {
.readpage = hpfs_symlink_readpage
};
static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
const unsigned char *old_name = old_dentry->d_name.name;
unsigned old_len = old_dentry->d_name.len;
const unsigned char *new_name = new_dentry->d_name.name;
unsigned new_len = new_dentry->d_name.len;
struct inode *i = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
struct quad_buffer_head qbh, qbh1;
struct hpfs_dirent *dep, *nde;
struct hpfs_dirent de;
dnode_secno dno;
int r;
struct buffer_head *bh;
struct fnode *fnode;
int err;
if ((err = hpfs_chk_name(new_name, &new_len))) return err;
err = 0;
hpfs_adjust_length(old_name, &old_len);
hpfs_lock(i->i_sb);
/* order doesn't matter, due to VFS exclusion */
/* Erm? Moving over the empty non-busy directory is perfectly legal */
if (new_inode && S_ISDIR(new_inode->i_mode)) {
err = -EINVAL;
goto end1;
}
if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
err = -ENOENT;
goto end1;
}
copy_de(&de, dep);
de.hidden = new_name[0] == '.';
if (new_inode) {
int r;
if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) {
clear_nlink(new_inode);
copy_de(nde, &de);
memcpy(nde->name, new_name, new_len);
hpfs_mark_4buffers_dirty(&qbh1);
hpfs_brelse4(&qbh1);
goto end;
}
hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent");
err = -EFSERROR;
goto end1;
}
err = r == 2 ? -ENOSPC : r == 1 ? -EFSERROR : 0;
goto end1;
}
if (new_dir == old_dir) hpfs_brelse4(&qbh);
if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) {
if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!");
err = r == 1 ? -ENOSPC : -EFSERROR;
if (new_dir != old_dir) hpfs_brelse4(&qbh);
goto end1;
}
if (new_dir == old_dir)
if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
err = -ENOENT;
goto end1;
}
if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) {
hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent");
err = r == 2 ? -ENOSPC : -EFSERROR;
goto end1;
}
end:
hpfs_i(i)->i_parent_dir = new_dir->i_ino;
if (S_ISDIR(i->i_mode)) {
inc_nlink(new_dir);
drop_nlink(old_dir);
}
if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) {
fnode->up = cpu_to_le32(new_dir->i_ino);
fnode->len = new_len;
memcpy(fnode->name, new_name, new_len>15?15:new_len);
if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len);
mark_buffer_dirty(bh);
brelse(bh);
}
end1:
hpfs_unlock(i->i_sb);
return err;
}
const struct inode_operations hpfs_dir_iops =
{
.create = hpfs_create,
.lookup = hpfs_lookup,
.unlink = hpfs_unlink,
.symlink = hpfs_symlink,
.mkdir = hpfs_mkdir,
.rmdir = hpfs_rmdir,
.mknod = hpfs_mknod,
.rename = hpfs_rename,
.setattr = hpfs_setattr,
};
| gpl-2.0 |
MoKee/android_kernel_lge_star | drivers/net/de620.c | 3084 | 26642 | /*
* de620.c $Revision: 1.40 $ BETA
*
*
* Linux driver for the D-Link DE-620 Ethernet pocket adapter.
*
* Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
*
* Based on adapter information gathered from DOS packetdriver
* sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
* Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
* Copyright, 1988, Russell Nelson, Crynwr Software
*
* Adapted to the sample network driver core for linux,
* written by: Donald Becker <becker@super.org>
* (Now at <becker@scyld.com>)
*
* Valuable assistance from:
* J. Joshua Kopper <kopper@rtsg.mot.com>
* Olav Kvittem <Olav.Kvittem@uninett.no>
* Germano Caronni <caronni@nessie.cs.id.ethz.ch>
* Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
*
*****************************************************************************/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
static const char version[] =
"de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
/***********************************************************************
*
* "Tuning" section.
*
* Compile-time options: (see below for descriptions)
* -DDE620_IO=0x378 (lpt1)
* -DDE620_IRQ=7 (lpt1)
* -DSHUTDOWN_WHEN_LOST
* -DCOUNT_LOOPS
* -DLOWSPEED
* -DREAD_DELAY
* -DWRITE_DELAY
*/
/*
* This driver assumes that the printer port is a "normal",
* dumb, uni-directional port!
* If your port is "fancy" in any way, please try to set it to "normal"
* with your BIOS setup. I have no access to machines with bi-directional
* ports, so I can't test such a driver :-(
* (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
*
* There are some clones of DE620 out there, with different names.
* If the current driver does not recognize a clone, try to change
* the following #define to:
*
* #define DE620_CLONE 1
*/
#define DE620_CLONE 0
/*
* If the adapter has problems with high speeds, enable this #define
* otherwise full printerport speed will be attempted.
*
* You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
*
#define LOWSPEED
*/
#ifndef READ_DELAY
#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
#endif
#ifndef WRITE_DELAY
#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
#endif
/*
* Enable this #define if you want the adapter to do a "ifconfig down" on
* itself when we have detected that something is possibly wrong with it.
* The default behaviour is to retry with "adapter_init()" until success.
* This should be used for debugging purposes only.
*
#define SHUTDOWN_WHEN_LOST
*/
#ifdef LOWSPEED
/*
* Enable this #define if you want to see debugging output that show how long
* we have to wait before the DE-620 is ready for the next read/write/command.
*
#define COUNT_LOOPS
*/
#endif
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/io.h>
#include <asm/system.h>
/* Constant definitions for the DE-620 registers, commands and bits */
#include "de620.h"
typedef unsigned char byte;
/*******************************************************
* *
* Definition of D-Link DE-620 Ethernet Pocket adapter *
* See also "de620.h" *
* *
*******************************************************/
#ifndef DE620_IO /* Compile-time configurable */
#define DE620_IO 0x378
#endif
#ifndef DE620_IRQ /* Compile-time configurable */
#define DE620_IRQ 7
#endif
#define DATA_PORT (dev->base_addr)
#define STATUS_PORT (dev->base_addr + 1)
#define COMMAND_PORT (dev->base_addr + 2)
#define RUNT 60 /* Too small Ethernet packet */
#define GIANT 1514 /* largest legal size packet, no fcs */
/*
* Force media with insmod:
* insmod de620.o bnc=1
* or
* insmod de620.o utp=1
*
* Force io and/or irq with insmod:
* insmod de620.o io=0x378 irq=7
*
* Make a clone skip the Ethernet-address range check:
* insmod de620.o clone=1
*/
static int bnc;
static int utp;
static int io = DE620_IO;
static int irq = DE620_IRQ;
static int clone = DE620_CLONE;
static spinlock_t de620_lock;
module_param(bnc, int, 0);
module_param(utp, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(clone, int, 0);
MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
/***********************************************
* *
* Index to functions, as function prototypes. *
* *
***********************************************/
/*
* Routines used internally. (See also "convenience macros.. below")
*/
/* Put in the device structure. */
static int de620_open(struct net_device *);
static int de620_close(struct net_device *);
static void de620_set_multicast_list(struct net_device *);
static int de620_start_xmit(struct sk_buff *, struct net_device *);
/* Dispatch from interrupts. */
static irqreturn_t de620_interrupt(int, void *);
static int de620_rx_intr(struct net_device *);
/* Initialization */
static int adapter_init(struct net_device *);
static int read_eeprom(struct net_device *);
/*
* D-Link driver variables:
*/
#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
#define DEF_NIC_CMD IRQEN | ICEN | DS1
static volatile byte NIC_Cmd;
static volatile byte next_rx_page;
static byte first_rx_page;
static byte last_rx_page;
static byte EIPRegister;
static struct nic {
byte NodeID[6];
byte RAM_Size;
byte Model;
byte Media;
byte SCR;
} nic_data;
/**********************************************************
* *
* Convenience macros/functions for D-Link DE-620 adapter *
* *
**********************************************************/
#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
/* Check for ready-status, and return a nibble (high 4 bits) for data input */
#ifdef COUNT_LOOPS
static int tot_cnt;
#endif
static inline byte
de620_ready(struct net_device *dev)
{
byte value;
register short int cnt = 0;
while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
++cnt;
#ifdef COUNT_LOOPS
tot_cnt += cnt;
#endif
return value & 0xf0; /* nibble */
}
static inline void
de620_send_command(struct net_device *dev, byte cmd)
{
de620_ready(dev);
if (cmd == W_DUMMY)
outb(NIC_Cmd, COMMAND_PORT);
outb(cmd, DATA_PORT);
outb(NIC_Cmd ^ CS0, COMMAND_PORT);
de620_ready(dev);
outb(NIC_Cmd, COMMAND_PORT);
}
static inline void
de620_put_byte(struct net_device *dev, byte value)
{
/* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
de620_ready(dev);
outb(value, DATA_PORT);
de620_flip_ds(dev);
}
static inline byte
de620_read_byte(struct net_device *dev)
{
byte value;
/* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
value = de620_ready(dev); /* High nibble */
de620_flip_ds(dev);
value |= de620_ready(dev) >> 4; /* Low nibble */
return value;
}
static inline void
de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
{
#ifndef LOWSPEED
byte uflip = NIC_Cmd ^ (DS0 | DS1);
byte dflip = NIC_Cmd;
#else /* LOWSPEED */
#ifdef COUNT_LOOPS
int bytes = count;
#endif /* COUNT_LOOPS */
#endif /* LOWSPEED */
#ifdef LOWSPEED
#ifdef COUNT_LOOPS
tot_cnt = 0;
#endif /* COUNT_LOOPS */
/* No further optimization useful, the limit is in the adapter. */
for ( ; count > 0; --count, ++buffer) {
de620_put_byte(dev,*buffer);
}
for ( count = pad ; count > 0; --count, ++buffer) {
de620_put_byte(dev, 0);
}
de620_send_command(dev,W_DUMMY);
#ifdef COUNT_LOOPS
/* trial debug output: loops per byte in de620_ready() */
printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
#endif /* COUNT_LOOPS */
#else /* not LOWSPEED */
for ( ; count > 0; count -=2) {
outb(*buffer++, DATA_PORT);
outb(uflip, COMMAND_PORT);
outb(*buffer++, DATA_PORT);
outb(dflip, COMMAND_PORT);
}
de620_send_command(dev,W_DUMMY);
#endif /* LOWSPEED */
}
static inline void
de620_read_block(struct net_device *dev, byte *data, int count)
{
#ifndef LOWSPEED
byte value;
byte uflip = NIC_Cmd ^ (DS0 | DS1);
byte dflip = NIC_Cmd;
#else /* LOWSPEED */
#ifdef COUNT_LOOPS
int bytes = count;
tot_cnt = 0;
#endif /* COUNT_LOOPS */
#endif /* LOWSPEED */
#ifdef LOWSPEED
/* No further optimization useful, the limit is in the adapter. */
while (count-- > 0) {
*data++ = de620_read_byte(dev);
de620_flip_ds(dev);
}
#ifdef COUNT_LOOPS
/* trial debug output: loops per byte in de620_ready() */
printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
#endif /* COUNT_LOOPS */
#else /* not LOWSPEED */
while (count-- > 0) {
value = inb(STATUS_PORT) & 0xf0; /* High nibble */
outb(uflip, COMMAND_PORT);
*data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
outb(dflip , COMMAND_PORT);
}
#endif /* LOWSPEED */
}
static inline void
de620_set_delay(struct net_device *dev)
{
de620_ready(dev);
outb(W_DFR, DATA_PORT);
outb(NIC_Cmd ^ CS0, COMMAND_PORT);
de620_ready(dev);
#ifdef LOWSPEED
outb(WRITE_DELAY, DATA_PORT);
#else
outb(0, DATA_PORT);
#endif
de620_flip_ds(dev);
de620_ready(dev);
#ifdef LOWSPEED
outb(READ_DELAY, DATA_PORT);
#else
outb(0, DATA_PORT);
#endif
de620_flip_ds(dev);
}
static inline void
de620_set_register(struct net_device *dev, byte reg, byte value)
{
de620_ready(dev);
outb(reg, DATA_PORT);
outb(NIC_Cmd ^ CS0, COMMAND_PORT);
de620_put_byte(dev, value);
}
static inline byte
de620_get_register(struct net_device *dev, byte reg)
{
byte value;
de620_send_command(dev,reg);
value = de620_read_byte(dev);
de620_send_command(dev,W_DUMMY);
return value;
}
/*********************************************************************
*
* Open/initialize the board.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is a non-reboot way to recover if something goes wrong.
*
*/
static int de620_open(struct net_device *dev)
{
int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
if (ret) {
printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
return ret;
}
if (adapter_init(dev)) {
ret = -EIO;
goto out_free_irq;
}
netif_start_queue(dev);
return 0;
out_free_irq:
free_irq(dev->irq, dev);
return ret;
}
/************************************************
*
* The inverse routine to de620_open().
*
*/
static int de620_close(struct net_device *dev)
{
netif_stop_queue(dev);
/* disable recv */
de620_set_register(dev, W_TCR, RXOFF);
free_irq(dev->irq, dev);
return 0;
}
/*********************************************
*
* Set or clear the multicast filter for this adaptor.
* (no real multicast implemented for the DE-620, but she can be promiscuous...)
*
*/
static void de620_set_multicast_list(struct net_device *dev)
{
if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
else
{ /* Disable promiscuous mode, use normal mode */
de620_set_register(dev, W_TCR, TCR_DEF);
}
}
/*******************************************************
*
* Handle timeouts on transmit
*/
static void de620_timeout(struct net_device *dev)
{
printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
/* Restart the adapter. */
if (!adapter_init(dev)) /* maybe close it */
netif_wake_queue(dev);
}
/*******************************************************
*
* Copy a buffer to the adapter transmit page memory.
* Start sending.
*/
static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
int len;
byte *buffer = skb->data;
byte using_txbuf;
using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
netif_stop_queue(dev);
if ((len = skb->len) < RUNT)
len = RUNT;
if (len & 1) /* send an even number of bytes */
++len;
/* Start real output */
spin_lock_irqsave(&de620_lock, flags);
pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
(int)skb->len, using_txbuf);
/* select a free tx buffer. if there is one... */
switch (using_txbuf) {
default: /* both are free: use TXBF0 */
case TXBF1: /* use TXBF0 */
de620_send_command(dev,W_CR | RW0);
using_txbuf |= TXBF0;
break;
case TXBF0: /* use TXBF1 */
de620_send_command(dev,W_CR | RW1);
using_txbuf |= TXBF1;
break;
case (TXBF0 | TXBF1): /* NONE!!! */
printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
spin_unlock_irqrestore(&de620_lock, flags);
return NETDEV_TX_BUSY;
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
if(!(using_txbuf == (TXBF0 | TXBF1)))
netif_wake_queue(dev);
dev->stats.tx_packets++;
spin_unlock_irqrestore(&de620_lock, flags);
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
/*****************************************************
*
* Handle the network interface interrupts.
*
*/
static irqreturn_t
de620_interrupt(int irq_in, void *dev_id)
{
struct net_device *dev = dev_id;
byte irq_status;
int bogus_count = 0;
int again = 0;
spin_lock(&de620_lock);
/* Read the status register (_not_ the status port) */
irq_status = de620_get_register(dev, R_STS);
pr_debug("de620_interrupt (%2.2X)\n", irq_status);
if (irq_status & RXGOOD) {
do {
again = de620_rx_intr(dev);
pr_debug("again=%d\n", again);
}
while (again && (++bogus_count < 100));
}
if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
netif_wake_queue(dev);
spin_unlock(&de620_lock);
return IRQ_HANDLED;
}
/**************************************
*
* Get a packet from the adapter
*
* Send it "upstairs"
*
*/
static int de620_rx_intr(struct net_device *dev)
{
struct header_buf {
byte status;
byte Rx_NextPage;
unsigned short Rx_ByteCount;
} header_buf;
struct sk_buff *skb;
int size;
byte *buffer;
byte pagelink;
byte curr_page;
pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
/* Tell the adapter that we are going to read data, and from where */
de620_send_command(dev, W_CR | RRN);
de620_set_register(dev, W_RSA1, next_rx_page);
de620_set_register(dev, W_RSA0, 0);
/* Deep breath, and away we goooooo */
de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
header_buf.status, header_buf.Rx_NextPage,
header_buf.Rx_ByteCount);
/* Plausible page header? */
pagelink = header_buf.Rx_NextPage;
if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
/* Ouch... Forget it! Skip all and start afresh... */
printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
/* You win some, you lose some. And sometimes plenty... */
adapter_init(dev);
netif_wake_queue(dev);
dev->stats.rx_over_errors++;
return 0;
}
/* OK, this look good, so far. Let's see if it's consistent... */
/* Let's compute the start of the next packet, based on where we are */
pagelink = next_rx_page +
((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
/* Are we going to wrap around the page counter? */
if (pagelink > last_rx_page)
pagelink -= (last_rx_page - first_rx_page + 1);
/* Is the _computed_ next page number equal to what the adapter says? */
if (pagelink != header_buf.Rx_NextPage) {
/* Naah, we'll skip this packet. Probably bogus data as well */
printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
de620_send_command(dev, W_DUMMY);
de620_set_register(dev, W_NPRF, next_rx_page);
dev->stats.rx_over_errors++;
return 0;
}
next_rx_page = pagelink;
size = header_buf.Rx_ByteCount - 4;
if ((size < RUNT) || (GIANT < size)) {
printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
}
else { /* Good packet? */
skb = dev_alloc_skb(size+2);
if (skb == NULL) { /* Yeah, but no place to put it... */
printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
dev->stats.rx_dropped++;
}
else { /* Yep! Go get it! */
skb_reserve(skb,2); /* Align */
/* skb->data points to the start of sk_buff data area */
buffer = skb_put(skb,size);
/* copy the packet into the buffer */
de620_read_block(dev, buffer, size);
pr_debug("Read %d bytes\n", size);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb); /* deliver it "upstairs" */
/* count all receives */
dev->stats.rx_packets++;
dev->stats.rx_bytes += size;
}
}
/* Let's peek ahead to see if we have read the last current packet */
/* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
curr_page = de620_get_register(dev, R_CPR);
de620_set_register(dev, W_NPRF, next_rx_page);
pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
return next_rx_page != curr_page; /* That was slightly tricky... */
}
/*********************************************
*
* Reset the adapter to a known state
*
*/
static int adapter_init(struct net_device *dev)
{
int i;
static int was_down;
if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
EIPRegister = NCTL0;
if (nic_data.Media != 1)
EIPRegister |= NIS0; /* not BNC */
}
else if (nic_data.Model == 2) { /* UTP */
EIPRegister = NCTL0 | NIS0;
}
if (utp)
EIPRegister = NCTL0 | NIS0;
if (bnc)
EIPRegister = NCTL0;
de620_send_command(dev, W_CR | RNOP | CLEAR);
de620_send_command(dev, W_CR | RNOP);
de620_set_register(dev, W_SCR, SCR_DEF);
/* disable recv to wait init */
de620_set_register(dev, W_TCR, RXOFF);
/* Set the node ID in the adapter */
for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
}
de620_set_register(dev, W_EIP, EIPRegister);
next_rx_page = first_rx_page = DE620_RX_START_PAGE;
if (nic_data.RAM_Size)
last_rx_page = nic_data.RAM_Size - 1;
else /* 64k RAM */
last_rx_page = 255;
de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
de620_send_command(dev, W_DUMMY);
de620_set_delay(dev);
/* Final sanity check: Anybody out there? */
/* Let's hope some bits from the statusregister make a good check */
#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
/* success: X 0 0 X 0 0 X X */
/* ignore: EEDI RXGOOD COLS LNKS*/
if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
#ifdef SHUTDOWN_WHEN_LOST
" and do a new ifconfig"
#endif
"! (%02x)\n", dev->name, i);
#ifdef SHUTDOWN_WHEN_LOST
/* Goodbye, cruel world... */
dev->flags &= ~IFF_UP;
de620_close(dev);
#endif
was_down = 1;
return 1; /* failed */
}
if (was_down) {
printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
was_down = 0;
}
/* All OK, go ahead... */
de620_set_register(dev, W_TCR, TCR_DEF);
return 0; /* all ok */
}
static const struct net_device_ops de620_netdev_ops = {
.ndo_open = de620_open,
.ndo_stop = de620_close,
.ndo_start_xmit = de620_start_xmit,
.ndo_tx_timeout = de620_timeout,
.ndo_set_multicast_list = de620_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/******************************************************************************
*
* Only start-up code below
*
*/
/****************************************
*
* Check if there is a DE-620 connected
*/
struct net_device * __init de620_probe(int unit)
{
byte checkbyte = 0xa5;
struct net_device *dev;
int err = -ENOMEM;
int i;
dev = alloc_etherdev(0);
if (!dev)
goto out;
spin_lock_init(&de620_lock);
/*
* This is where the base_addr and irq gets set.
* Tunable at compile-time and insmod-time
*/
dev->base_addr = io;
dev->irq = irq;
/* allow overriding parameters on command line */
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
pr_debug("%s", version);
printk(KERN_INFO "D-Link DE-620 pocket adapter");
if (!request_region(dev->base_addr, 3, "de620")) {
printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
err = -EBUSY;
goto out1;
}
/* Initially, configure basic nibble mode, so we can read the EEPROM */
NIC_Cmd = DEF_NIC_CMD;
de620_set_register(dev, W_EIP, EIPRegister);
/* Anybody out there? */
de620_set_register(dev, W_CPR, checkbyte);
checkbyte = de620_get_register(dev, R_CPR);
if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
printk(" not identified in the printer port\n");
err = -ENODEV;
goto out2;
}
/* else, got it! */
dev->dev_addr[0] = nic_data.NodeID[0];
for (i = 1; i < ETH_ALEN; i++) {
dev->dev_addr[i] = nic_data.NodeID[i];
dev->broadcast[i] = 0xff;
}
printk(", Ethernet Address: %pM", dev->dev_addr);
printk(" (%dk RAM,",
(nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
if (nic_data.Media == 1)
printk(" BNC)\n");
else
printk(" UTP)\n");
dev->netdev_ops = &de620_netdev_ops;
dev->watchdog_timeo = HZ*2;
/* base_addr and irq are already set, see above! */
/* dump eeprom */
pr_debug("\nEEPROM contents:\n"
"RAM_Size = 0x%02X\n"
"NodeID = %pM\n"
"Model = %d\n"
"Media = %d\n"
"SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
nic_data.Model, nic_data.Media, nic_data.SCR);
err = register_netdev(dev);
if (err)
goto out2;
return dev;
out2:
release_region(dev->base_addr, 3);
out1:
free_netdev(dev);
out:
return ERR_PTR(err);
}
/**********************************
*
* Read info from on-board EEPROM
*
* Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
*/
#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
static unsigned short __init ReadAWord(struct net_device *dev, int from)
{
unsigned short data;
int nbits;
/* cs [__~~] SET SEND STATE */
/* di [____] */
/* sck [_~~_] */
sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
/* Send the 9-bit address from where we want to read the 16-bit word */
for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
if (from & 0x0100) { /* bit set? */
/* cs [~~~~] SEND 1 */
/* di [~~~~] */
/* sck [_~~_] */
sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
}
else {
/* cs [~~~~] SEND 0 */
/* di [____] */
/* sck [_~~_] */
sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
}
}
/* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
for (data = 0, nbits = 16; nbits > 0; --nbits) {
/* cs [~~~~] SEND 0 */
/* di [____] */
/* sck [_~~_] */
sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
}
/* cs [____] RESET SEND STATE */
/* di [____] */
/* sck [_~~_] */
sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
return data;
}
static int __init read_eeprom(struct net_device *dev)
{
unsigned short wrd;
/* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
return -1; /* Nope, not a DE-620 */
nic_data.NodeID[0] = wrd & 0xff;
nic_data.NodeID[1] = wrd >> 8;
wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
return -1; /* Nope, not a DE-620 */
nic_data.NodeID[2] = wrd & 0xff;
nic_data.NodeID[3] = wrd >> 8;
wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
nic_data.NodeID[4] = wrd & 0xff;
nic_data.NodeID[5] = wrd >> 8;
wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
nic_data.RAM_Size = (wrd >> 8);
wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
nic_data.Model = (wrd & 0xff);
wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
nic_data.Media = (wrd & 0xff);
wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
nic_data.SCR = (wrd >> 8);
return 0; /* no errors */
}
/******************************************************************************
*
* Loadable module skeleton
*
*/
#ifdef MODULE
static struct net_device *de620_dev;
int __init init_module(void)
{
de620_dev = de620_probe(-1);
if (IS_ERR(de620_dev))
return PTR_ERR(de620_dev);
return 0;
}
void cleanup_module(void)
{
unregister_netdev(de620_dev);
release_region(de620_dev->base_addr, 3);
free_netdev(de620_dev);
}
#endif /* MODULE */
MODULE_LICENSE("GPL");
| gpl-2.0 |
frank-liu/multichannel | fs/autofs4/init.c | 3340 | 1239 | /* -*- c -*- --------------------------------------------------------------- *
*
* linux/fs/autofs/init.c
*
* Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* ------------------------------------------------------------------------- */
#include <linux/module.h>
#include <linux/init.h>
#include "autofs_i.h"
static struct dentry *autofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, autofs4_fill_super);
}
static struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
.mount = autofs_mount,
.kill_sb = autofs4_kill_sb,
};
static int __init init_autofs4_fs(void)
{
int err;
err = register_filesystem(&autofs_fs_type);
if (err)
return err;
autofs_dev_ioctl_init();
return err;
}
static void __exit exit_autofs4_fs(void)
{
autofs_dev_ioctl_exit();
unregister_filesystem(&autofs_fs_type);
}
module_init(init_autofs4_fs)
module_exit(exit_autofs4_fs)
MODULE_LICENSE("GPL");
| gpl-2.0 |
edoko/AirKernel_GN_JB | drivers/scsi/fd_mcs.c | 3340 | 39704 | /* fd_mcs.c -- Future Domain MCS 600/700 (or IBM OEM) driver
*
* FutureDomain MCS-600/700 v0.2 03/11/1998 by ZP Gu (zpg@castle.net)
*
* This driver is cloned from fdomain.* to specifically support
* the Future Domain MCS 600/700 MCA SCSI adapters. Some PS/2s
* also equipped with IBM Fast SCSI Adapter/A which is an OEM
* of MCS 700.
*
* This driver also supports Reply SB16/SCSI card (the SCSI part).
*
* What makes this driver different is that this driver is MCA only
* and it supports multiple adapters in the same system, IRQ
* sharing, some driver statistics, and maps highest SCSI id to sda.
* All cards are auto-detected.
*
* Assumptions: TMC-1800/18C50/18C30, BIOS >= 3.4
*
* LILO command-line options:
* fd_mcs=<FIFO_COUNT>[,<FIFO_SIZE>]
*
* ********************************************************
* Please see Copyrights/Comments in fdomain.* for credits.
* Following is from fdomain.c for acknowledgement:
*
* Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
* Revised: Wed Oct 2 11:10:55 1996 by r.faith@ieee.org
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992, 1993, 1994, 1995, 1996 Rickard E. Faith
*
* $Id: fdomain.c,v 5.45 1996/10/02 15:13:06 root Exp $
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
**************************************************************************
NOTES ON USER DEFINABLE OPTIONS:
DEBUG: This turns on the printing of various debug information.
ENABLE_PARITY: This turns on SCSI parity checking. With the current
driver, all attached devices must support SCSI parity. If none of your
devices support parity, then you can probably get the driver to work by
turning this option off. I have no way of testing this, however, and it
would appear that no one ever uses this option.
FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
the SCSI device, an interrupt will be raised. Therefore, this could be as
low as 0, or as high as 16. Note, however, that values which are too high
or too low seem to prevent any interrupts from occurring, and thereby lock
up the machine. I have found that 2 is a good number, but throughput may
be increased by changing this value to values which are close to 2.
Please let me know if you try any different values.
[*****Now a runtime option*****]
RESELECTION: This is no longer an option, since I gave up trying to
implement it in version 4.x of this driver. It did not improve
performance at all and made the driver unstable (because I never found one
of the two race conditions which were introduced by the multiple
outstanding command code). The instability seems a very high price to pay
just so that you don't have to wait for the tape to rewind. If you want
this feature implemented, send me patches. I'll be happy to send a copy
of my (broken) driver to anyone who would like to see a copy.
**************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/mca.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <scsi/scsicam.h>
#include <linux/mca-legacy.h>
#include <asm/io.h>
#include <asm/system.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#define DRIVER_VERSION "v0.2 by ZP Gu<zpg@castle.net>"
/* START OF USER DEFINABLE OPTIONS */
#define DEBUG 0 /* Enable debugging output */
#define ENABLE_PARITY 1 /* Enable SCSI Parity */
/* END OF USER DEFINABLE OPTIONS */
#if DEBUG
#define EVERY_ACCESS 0 /* Write a line on every scsi access */
#define ERRORS_ONLY 1 /* Only write a line if there is an error */
#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
#define DEBUG_ABORT 1 /* Debug abort() routine */
#define DEBUG_RESET 1 /* Debug reset() routine */
#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
#else
#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
#define ERRORS_ONLY 0
#define DEBUG_MESSAGES 0
#define DEBUG_ABORT 0
#define DEBUG_RESET 0
#define DEBUG_RACE 0
#endif
/* Errors are reported on the line, so we don't need to report them again */
#if EVERY_ACCESS
#undef ERRORS_ONLY
#define ERRORS_ONLY 0
#endif
#if ENABLE_PARITY
#define PARITY_MASK 0x08
#else
#define PARITY_MASK 0x00
#endif
enum chip_type {
unknown = 0x00,
tmc1800 = 0x01,
tmc18c50 = 0x02,
tmc18c30 = 0x03,
};
enum {
in_arbitration = 0x02,
in_selection = 0x04,
in_other = 0x08,
disconnect = 0x10,
aborted = 0x20,
sent_ident = 0x40,
};
enum in_port_type {
Read_SCSI_Data = 0,
SCSI_Status = 1,
TMC_Status = 2,
FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
LSB_ID_Code = 5,
MSB_ID_Code = 6,
Read_Loopback = 7,
SCSI_Data_NoACK = 8,
Interrupt_Status = 9,
Configuration1 = 10,
Configuration2 = 11, /* tmc18c50/tmc18c30 only */
Read_FIFO = 12,
FIFO_Data_Count = 14
};
enum out_port_type {
Write_SCSI_Data = 0,
SCSI_Cntl = 1,
Interrupt_Cntl = 2,
SCSI_Mode_Cntl = 3,
TMC_Cntl = 4,
Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
Write_Loopback = 7,
IO_Control = 11, /* tmc18c30 only */
Write_FIFO = 12
};
struct fd_hostdata {
unsigned long _bios_base;
int _bios_major;
int _bios_minor;
volatile int _in_command;
Scsi_Cmnd *_current_SC;
enum chip_type _chip;
int _adapter_mask;
int _fifo_count; /* Number of 512 byte blocks before INTR */
char _adapter_name[64];
#if DEBUG_RACE
volatile int _in_interrupt_flag;
#endif
int _SCSI_Mode_Cntl_port;
int _FIFO_Data_Count_port;
int _Interrupt_Cntl_port;
int _Interrupt_Status_port;
int _Interrupt_Cond_port;
int _Read_FIFO_port;
int _Read_SCSI_Data_port;
int _SCSI_Cntl_port;
int _SCSI_Data_NoACK_port;
int _SCSI_Status_port;
int _TMC_Cntl_port;
int _TMC_Status_port;
int _Write_FIFO_port;
int _Write_SCSI_Data_port;
int _FIFO_Size; /* = 0x2000; 8k FIFO for
pre-tmc18c30 chips */
/* simple stats */
int _Bytes_Read;
int _Bytes_Written;
int _INTR_Processed;
};
#define FD_MAX_HOSTS 3 /* enough? */
#define HOSTDATA(shpnt) ((struct fd_hostdata *) shpnt->hostdata)
#define bios_base (HOSTDATA(shpnt)->_bios_base)
#define bios_major (HOSTDATA(shpnt)->_bios_major)
#define bios_minor (HOSTDATA(shpnt)->_bios_minor)
#define in_command (HOSTDATA(shpnt)->_in_command)
#define current_SC (HOSTDATA(shpnt)->_current_SC)
#define chip (HOSTDATA(shpnt)->_chip)
#define adapter_mask (HOSTDATA(shpnt)->_adapter_mask)
#define FIFO_COUNT (HOSTDATA(shpnt)->_fifo_count)
#define adapter_name (HOSTDATA(shpnt)->_adapter_name)
#if DEBUG_RACE
#define in_interrupt_flag (HOSTDATA(shpnt)->_in_interrupt_flag)
#endif
#define SCSI_Mode_Cntl_port (HOSTDATA(shpnt)->_SCSI_Mode_Cntl_port)
#define FIFO_Data_Count_port (HOSTDATA(shpnt)->_FIFO_Data_Count_port)
#define Interrupt_Cntl_port (HOSTDATA(shpnt)->_Interrupt_Cntl_port)
#define Interrupt_Status_port (HOSTDATA(shpnt)->_Interrupt_Status_port)
#define Interrupt_Cond_port (HOSTDATA(shpnt)->_Interrupt_Cond_port)
#define Read_FIFO_port (HOSTDATA(shpnt)->_Read_FIFO_port)
#define Read_SCSI_Data_port (HOSTDATA(shpnt)->_Read_SCSI_Data_port)
#define SCSI_Cntl_port (HOSTDATA(shpnt)->_SCSI_Cntl_port)
#define SCSI_Data_NoACK_port (HOSTDATA(shpnt)->_SCSI_Data_NoACK_port)
#define SCSI_Status_port (HOSTDATA(shpnt)->_SCSI_Status_port)
#define TMC_Cntl_port (HOSTDATA(shpnt)->_TMC_Cntl_port)
#define TMC_Status_port (HOSTDATA(shpnt)->_TMC_Status_port)
#define Write_FIFO_port (HOSTDATA(shpnt)->_Write_FIFO_port)
#define Write_SCSI_Data_port (HOSTDATA(shpnt)->_Write_SCSI_Data_port)
#define FIFO_Size (HOSTDATA(shpnt)->_FIFO_Size)
#define Bytes_Read (HOSTDATA(shpnt)->_Bytes_Read)
#define Bytes_Written (HOSTDATA(shpnt)->_Bytes_Written)
#define INTR_Processed (HOSTDATA(shpnt)->_INTR_Processed)
struct fd_mcs_adapters_struct {
char *name;
int id;
enum chip_type fd_chip;
int fifo_size;
int fifo_count;
};
#define REPLY_ID 0x5137
static struct fd_mcs_adapters_struct fd_mcs_adapters[] = {
{"Future Domain SCSI Adapter MCS-700(18C50)",
0x60e9,
tmc18c50,
0x2000,
4},
{"Future Domain SCSI Adapter MCS-600/700(TMC-1800)",
0x6127,
tmc1800,
0x2000,
4},
{"Reply Sound Blaster/SCSI Adapter",
REPLY_ID,
tmc18c30,
0x800,
2},
};
#define FD_BRDS ARRAY_SIZE(fd_mcs_adapters)
static irqreturn_t fd_mcs_intr(int irq, void *dev_id);
static unsigned long addresses[] = { 0xc8000, 0xca000, 0xce000, 0xde000 };
static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
static unsigned short interrupts[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
/* host information */
static int found = 0;
static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
static int user_fifo_count = 0;
static int user_fifo_size = 0;
#ifndef MODULE
static int __init fd_mcs_setup(char *str)
{
static int done_setup = 0;
int ints[3];
get_options(str, 3, ints);
if (done_setup++ || ints[0] < 1 || ints[0] > 2 || ints[1] < 1 || ints[1] > 16) {
printk("fd_mcs: usage: fd_mcs=FIFO_COUNT, FIFO_SIZE\n");
return 0;
}
user_fifo_count = ints[0] >= 1 ? ints[1] : 0;
user_fifo_size = ints[0] >= 2 ? ints[2] : 0;
return 1;
}
__setup("fd_mcs=", fd_mcs_setup);
#endif /* !MODULE */
static void print_banner(struct Scsi_Host *shpnt)
{
printk("scsi%d <fd_mcs>: ", shpnt->host_no);
if (bios_base) {
printk("BIOS at 0x%lX", bios_base);
} else {
printk("No BIOS");
}
printk(", HostID %d, %s Chip, IRQ %d, IO 0x%lX\n", shpnt->this_id, chip == tmc18c50 ? "TMC-18C50" : (chip == tmc18c30 ? "TMC-18C30" : (chip == tmc1800 ? "TMC-1800" : "Unknown")), shpnt->irq, shpnt->io_port);
}
static void do_pause(unsigned amount)
{ /* Pause for amount*10 milliseconds */
do {
mdelay(10);
} while (--amount);
}
static void fd_mcs_make_bus_idle(struct Scsi_Host *shpnt)
{
outb(0, SCSI_Cntl_port);
outb(0, SCSI_Mode_Cntl_port);
if (chip == tmc18c50 || chip == tmc18c30)
outb(0x21 | PARITY_MASK, TMC_Cntl_port); /* Clear forced intr. */
else
outb(0x01 | PARITY_MASK, TMC_Cntl_port);
}
static int fd_mcs_detect(struct scsi_host_template * tpnt)
{
int loop;
struct Scsi_Host *shpnt;
/* get id, port, bios, irq */
int slot;
u_char pos2, pos3, pos4;
int id, port, irq;
unsigned long bios;
/* if not MCA machine, return */
if (!MCA_bus)
return 0;
/* changeable? */
id = 7;
for (loop = 0; loop < FD_BRDS; loop++) {
slot = 0;
while (MCA_NOTFOUND != (slot = mca_find_adapter(fd_mcs_adapters[loop].id, slot))) {
/* if we get this far, an adapter has been detected and is
enabled */
printk(KERN_INFO "scsi <fd_mcs>: %s at slot %d\n", fd_mcs_adapters[loop].name, slot + 1);
pos2 = mca_read_stored_pos(slot, 2);
pos3 = mca_read_stored_pos(slot, 3);
pos4 = mca_read_stored_pos(slot, 4);
/* ready for next probe */
slot++;
if (fd_mcs_adapters[loop].id == REPLY_ID) { /* reply card */
static int reply_irq[] = { 10, 11, 14, 15 };
bios = 0; /* no bios */
if (pos2 & 0x2)
port = ports[pos4 & 0x3];
else
continue;
/* can't really disable it, same as irq=10 */
irq = reply_irq[((pos4 >> 2) & 0x1) + 2 * ((pos4 >> 4) & 0x1)];
} else {
bios = addresses[pos2 >> 6];
port = ports[(pos2 >> 4) & 0x03];
irq = interrupts[(pos2 >> 1) & 0x07];
}
if (irq) {
/* claim the slot */
mca_set_adapter_name(slot - 1, fd_mcs_adapters[loop].name);
/* check irq/region */
if (request_irq(irq, fd_mcs_intr, IRQF_SHARED, "fd_mcs", hosts)) {
printk(KERN_ERR "fd_mcs: interrupt is not available, skipping...\n");
continue;
}
/* request I/O region */
if (request_region(port, 0x10, "fd_mcs")) {
printk(KERN_ERR "fd_mcs: I/O region is already in use, skipping...\n");
continue;
}
/* register */
if (!(shpnt = scsi_register(tpnt, sizeof(struct fd_hostdata)))) {
printk(KERN_ERR "fd_mcs: scsi_register() failed\n");
release_region(port, 0x10);
free_irq(irq, hosts);
continue;
}
/* save name */
strcpy(adapter_name, fd_mcs_adapters[loop].name);
/* chip/fifo */
chip = fd_mcs_adapters[loop].fd_chip;
/* use boot time value if available */
FIFO_COUNT = user_fifo_count ? user_fifo_count : fd_mcs_adapters[loop].fifo_count;
FIFO_Size = user_fifo_size ? user_fifo_size : fd_mcs_adapters[loop].fifo_size;
/* FIXME: Do we need to keep this bit of code inside NOT_USED around at all? */
#ifdef NOT_USED
/* *************************************************** */
/* Try to toggle 32-bit mode. This only
works on an 18c30 chip. (User reports
say this works, so we should switch to
it in the near future.) */
outb(0x80, port + IO_Control);
if ((inb(port + Configuration2) & 0x80) == 0x80) {
outb(0x00, port + IO_Control);
if ((inb(port + Configuration2) & 0x80) == 0x00) {
chip = tmc18c30;
FIFO_Size = 0x800; /* 2k FIFO */
printk("FIRST: chip=%s, fifo_size=0x%x\n", (chip == tmc18c30) ? "tmc18c30" : "tmc18c50", FIFO_Size);
}
}
/* That should have worked, but appears to
have problems. Let's assume it is an
18c30 if the RAM is disabled. */
if (inb(port + Configuration2) & 0x02) {
chip = tmc18c30;
FIFO_Size = 0x800; /* 2k FIFO */
printk("SECOND: chip=%s, fifo_size=0x%x\n", (chip == tmc18c30) ? "tmc18c30" : "tmc18c50", FIFO_Size);
}
/* *************************************************** */
#endif
/* IBM/ANSI scsi scan ordering */
/* Stick this back in when the scsi.c changes are there */
shpnt->reverse_ordering = 1;
/* saving info */
hosts[found++] = shpnt;
shpnt->this_id = id;
shpnt->irq = irq;
shpnt->io_port = port;
shpnt->n_io_port = 0x10;
/* save */
bios_base = bios;
adapter_mask = (1 << id);
/* save more */
SCSI_Mode_Cntl_port = port + SCSI_Mode_Cntl;
FIFO_Data_Count_port = port + FIFO_Data_Count;
Interrupt_Cntl_port = port + Interrupt_Cntl;
Interrupt_Status_port = port + Interrupt_Status;
Interrupt_Cond_port = port + Interrupt_Cond;
Read_FIFO_port = port + Read_FIFO;
Read_SCSI_Data_port = port + Read_SCSI_Data;
SCSI_Cntl_port = port + SCSI_Cntl;
SCSI_Data_NoACK_port = port + SCSI_Data_NoACK;
SCSI_Status_port = port + SCSI_Status;
TMC_Cntl_port = port + TMC_Cntl;
TMC_Status_port = port + TMC_Status;
Write_FIFO_port = port + Write_FIFO;
Write_SCSI_Data_port = port + Write_SCSI_Data;
Bytes_Read = 0;
Bytes_Written = 0;
INTR_Processed = 0;
/* say something */
print_banner(shpnt);
/* reset */
outb(1, SCSI_Cntl_port);
do_pause(2);
outb(0, SCSI_Cntl_port);
do_pause(115);
outb(0, SCSI_Mode_Cntl_port);
outb(PARITY_MASK, TMC_Cntl_port);
/* done reset */
}
}
if (found == FD_MAX_HOSTS) {
printk("fd_mcs: detecting reached max=%d host adapters.\n", FD_MAX_HOSTS);
break;
}
}
return found;
}
static const char *fd_mcs_info(struct Scsi_Host *shpnt)
{
return adapter_name;
}
static int TOTAL_INTR = 0;
/*
* inout : decides on the direction of the dataflow and the meaning of the
* variables
* buffer: If inout==FALSE data is being written to it else read from it
* *start: If inout==FALSE start of the valid data in the buffer
* offset: If inout==FALSE offset from the beginning of the imaginary file
* from which we start writing into the buffer
* length: If inout==FALSE max number of bytes to be written into the buffer
* else number of bytes in the buffer
*/
static int fd_mcs_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
{
int len = 0;
if (inout)
return (-ENOSYS);
*start = buffer + offset;
len += sprintf(buffer + len, "Future Domain MCS-600/700 Driver %s\n", DRIVER_VERSION);
len += sprintf(buffer + len, "HOST #%d: %s\n", shpnt->host_no, adapter_name);
len += sprintf(buffer + len, "FIFO Size=0x%x, FIFO Count=%d\n", FIFO_Size, FIFO_COUNT);
len += sprintf(buffer + len, "DriverCalls=%d, Interrupts=%d, BytesRead=%d, BytesWrite=%d\n\n", TOTAL_INTR, INTR_Processed, Bytes_Read, Bytes_Written);
if ((len -= offset) <= 0)
return 0;
if (len > length)
len = length;
return len;
}
static int fd_mcs_select(struct Scsi_Host *shpnt, int target)
{
int status;
unsigned long timeout;
outb(0x82, SCSI_Cntl_port); /* Bus Enable + Select */
outb(adapter_mask | (1 << target), SCSI_Data_NoACK_port);
/* Stop arbitration and enable parity */
outb(PARITY_MASK, TMC_Cntl_port);
timeout = 350; /* 350mS -- because of timeouts
(was 250mS) */
do {
status = inb(SCSI_Status_port); /* Read adapter status */
if (status & 1) { /* Busy asserted */
/* Enable SCSI Bus (on error, should make bus idle with 0) */
outb(0x80, SCSI_Cntl_port);
return 0;
}
udelay(1000); /* wait one msec */
} while (--timeout);
/* Make bus idle */
fd_mcs_make_bus_idle(shpnt);
#if EVERY_ACCESS
if (!target)
printk("Selection failed\n");
#endif
#if ERRORS_ONLY
if (!target) {
static int flag = 0;
if (!flag) /* Skip first failure for all chips. */
++flag;
else
printk("fd_mcs: Selection failed\n");
}
#endif
return 1;
}
static void my_done(struct Scsi_Host *shpnt, int error)
{
if (in_command) {
in_command = 0;
outb(0x00, Interrupt_Cntl_port);
fd_mcs_make_bus_idle(shpnt);
current_SC->result = error;
current_SC->scsi_done(current_SC);
} else {
panic("fd_mcs: my_done() called outside of command\n");
}
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
}
/* only my_done needs to be protected */
static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
{
unsigned long flags;
int status;
int done = 0;
unsigned data_count, tmp_count;
int i = 0;
struct Scsi_Host *shpnt;
TOTAL_INTR++;
/* search for one adapter-response on shared interrupt */
while ((shpnt = hosts[i++])) {
if ((inb(TMC_Status_port)) & 1)
break;
}
/* return if some other device on this IRQ caused the interrupt */
if (!shpnt) {
return IRQ_NONE;
}
INTR_Processed++;
outb(0x00, Interrupt_Cntl_port);
/* Abort calls my_done, so we do nothing here. */
if (current_SC->SCp.phase & aborted) {
#if DEBUG_ABORT
printk("Interrupt after abort, ignoring\n");
#endif
/* return IRQ_HANDLED; */
}
#if DEBUG_RACE
++in_interrupt_flag;
#endif
if (current_SC->SCp.phase & in_arbitration) {
status = inb(TMC_Status_port); /* Read adapter status */
if (!(status & 0x02)) {
#if EVERY_ACCESS
printk(" AFAIL ");
#endif
spin_lock_irqsave(shpnt->host_lock, flags);
my_done(shpnt, DID_BUS_BUSY << 16);
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
current_SC->SCp.phase = in_selection;
outb(0x40 | FIFO_COUNT, Interrupt_Cntl_port);
outb(0x82, SCSI_Cntl_port); /* Bus Enable + Select */
outb(adapter_mask | (1 << scmd_id(current_SC)), SCSI_Data_NoACK_port);
/* Stop arbitration and enable parity */
outb(0x10 | PARITY_MASK, TMC_Cntl_port);
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
} else if (current_SC->SCp.phase & in_selection) {
status = inb(SCSI_Status_port);
if (!(status & 0x01)) {
/* Try again, for slow devices */
if (fd_mcs_select(shpnt, scmd_id(current_SC))) {
#if EVERY_ACCESS
printk(" SFAIL ");
#endif
spin_lock_irqsave(shpnt->host_lock, flags);
my_done(shpnt, DID_NO_CONNECT << 16);
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else {
#if EVERY_ACCESS
printk(" AltSel ");
#endif
/* Stop arbitration and enable parity */
outb(0x10 | PARITY_MASK, TMC_Cntl_port);
}
}
current_SC->SCp.phase = in_other;
outb(0x90 | FIFO_COUNT, Interrupt_Cntl_port);
outb(0x80, SCSI_Cntl_port);
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
}
/* current_SC->SCp.phase == in_other: this is the body of the routine */
status = inb(SCSI_Status_port);
if (status & 0x10) { /* REQ */
switch (status & 0x0e) {
case 0x08: /* COMMAND OUT */
outb(current_SC->cmnd[current_SC->SCp.sent_command++], Write_SCSI_Data_port);
#if EVERY_ACCESS
printk("CMD = %x,", current_SC->cmnd[current_SC->SCp.sent_command - 1]);
#endif
break;
case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
current_SC->SCp.have_data_in = -1;
outb(0xd0 | PARITY_MASK, TMC_Cntl_port);
}
break;
case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
current_SC->SCp.have_data_in = 1;
outb(0x90 | PARITY_MASK, TMC_Cntl_port);
}
break;
case 0x0c: /* STATUS IN */
current_SC->SCp.Status = inb(Read_SCSI_Data_port);
#if EVERY_ACCESS
printk("Status = %x, ", current_SC->SCp.Status);
#endif
#if ERRORS_ONLY
if (current_SC->SCp.Status && current_SC->SCp.Status != 2 && current_SC->SCp.Status != 8) {
printk("ERROR fd_mcs: target = %d, command = %x, status = %x\n", current_SC->device->id, current_SC->cmnd[0], current_SC->SCp.Status);
}
#endif
break;
case 0x0a: /* MESSAGE OUT */
outb(MESSAGE_REJECT, Write_SCSI_Data_port); /* Reject */
break;
case 0x0e: /* MESSAGE IN */
current_SC->SCp.Message = inb(Read_SCSI_Data_port);
#if EVERY_ACCESS
printk("Message = %x, ", current_SC->SCp.Message);
#endif
if (!current_SC->SCp.Message)
++done;
#if DEBUG_MESSAGES || EVERY_ACCESS
if (current_SC->SCp.Message) {
printk("fd_mcs: message = %x\n", current_SC->SCp.Message);
}
#endif
break;
}
}
if (chip == tmc1800 && !current_SC->SCp.have_data_in && (current_SC->SCp.sent_command >= current_SC->cmd_len)) {
/* We have to get the FIFO direction
correct, so I've made a table based
on the SCSI Standard of which commands
appear to require a DATA OUT phase.
*/
/*
p. 94: Command for all device types
CHANGE DEFINITION 40 DATA OUT
COMPARE 39 DATA OUT
COPY 18 DATA OUT
COPY AND VERIFY 3a DATA OUT
INQUIRY 12
LOG SELECT 4c DATA OUT
LOG SENSE 4d
MODE SELECT (6) 15 DATA OUT
MODE SELECT (10) 55 DATA OUT
MODE SENSE (6) 1a
MODE SENSE (10) 5a
READ BUFFER 3c
RECEIVE DIAGNOSTIC RESULTS 1c
REQUEST SENSE 03
SEND DIAGNOSTIC 1d DATA OUT
TEST UNIT READY 00
WRITE BUFFER 3b DATA OUT
p.178: Commands for direct-access devices (not listed on p. 94)
FORMAT UNIT 04 DATA OUT
LOCK-UNLOCK CACHE 36
PRE-FETCH 34
PREVENT-ALLOW MEDIUM REMOVAL 1e
READ (6)/RECEIVE 08
READ (10) 3c
READ CAPACITY 25
READ DEFECT DATA (10) 37
READ LONG 3e
REASSIGN BLOCKS 07 DATA OUT
RELEASE 17
RESERVE 16 DATA OUT
REZERO UNIT/REWIND 01
SEARCH DATA EQUAL (10) 31 DATA OUT
SEARCH DATA HIGH (10) 30 DATA OUT
SEARCH DATA LOW (10) 32 DATA OUT
SEEK (6) 0b
SEEK (10) 2b
SET LIMITS (10) 33
START STOP UNIT 1b
SYNCHRONIZE CACHE 35
VERIFY (10) 2f
WRITE (6)/PRINT/SEND 0a DATA OUT
WRITE (10)/SEND 2a DATA OUT
WRITE AND VERIFY (10) 2e DATA OUT
WRITE LONG 3f DATA OUT
WRITE SAME 41 DATA OUT ?
p. 261: Commands for sequential-access devices (not previously listed)
ERASE 19
LOAD UNLOAD 1b
LOCATE 2b
READ BLOCK LIMITS 05
READ POSITION 34
READ REVERSE 0f
RECOVER BUFFERED DATA 14
SPACE 11
WRITE FILEMARKS 10 ?
p. 298: Commands for printer devices (not previously listed)
****** NOT SUPPORTED BY THIS DRIVER, since 0b is SEEK (6) *****
SLEW AND PRINT 0b DATA OUT -- same as seek
STOP PRINT 1b
SYNCHRONIZE BUFFER 10
p. 315: Commands for processor devices (not previously listed)
p. 321: Commands for write-once devices (not previously listed)
MEDIUM SCAN 38
READ (12) a8
SEARCH DATA EQUAL (12) b1 DATA OUT
SEARCH DATA HIGH (12) b0 DATA OUT
SEARCH DATA LOW (12) b2 DATA OUT
SET LIMITS (12) b3
VERIFY (12) af
WRITE (12) aa DATA OUT
WRITE AND VERIFY (12) ae DATA OUT
p. 332: Commands for CD-ROM devices (not previously listed)
PAUSE/RESUME 4b
PLAY AUDIO (10) 45
PLAY AUDIO (12) a5
PLAY AUDIO MSF 47
PLAY TRACK RELATIVE (10) 49
PLAY TRACK RELATIVE (12) a9
READ HEADER 44
READ SUB-CHANNEL 42
READ TOC 43
p. 370: Commands for scanner devices (not previously listed)
GET DATA BUFFER STATUS 34
GET WINDOW 25
OBJECT POSITION 31
SCAN 1b
SET WINDOW 24 DATA OUT
p. 391: Commands for optical memory devices (not listed)
ERASE (10) 2c
ERASE (12) ac
MEDIUM SCAN 38 DATA OUT
READ DEFECT DATA (12) b7
READ GENERATION 29
READ UPDATED BLOCK 2d
UPDATE BLOCK 3d DATA OUT
p. 419: Commands for medium changer devices (not listed)
EXCHANGE MEDIUM 46
INITIALIZE ELEMENT STATUS 07
MOVE MEDIUM a5
POSITION TO ELEMENT 2b
READ ELEMENT STATUS b8
REQUEST VOL. ELEMENT ADDRESS b5
SEND VOLUME TAG b6 DATA OUT
p. 454: Commands for communications devices (not listed previously)
GET MESSAGE (6) 08
GET MESSAGE (10) 28
GET MESSAGE (12) a8
*/
switch (current_SC->cmnd[0]) {
case CHANGE_DEFINITION:
case COMPARE:
case COPY:
case COPY_VERIFY:
case LOG_SELECT:
case MODE_SELECT:
case MODE_SELECT_10:
case SEND_DIAGNOSTIC:
case WRITE_BUFFER:
case FORMAT_UNIT:
case REASSIGN_BLOCKS:
case RESERVE:
case SEARCH_EQUAL:
case SEARCH_HIGH:
case SEARCH_LOW:
case WRITE_6:
case WRITE_10:
case WRITE_VERIFY:
case 0x3f:
case 0x41:
case 0xb1:
case 0xb0:
case 0xb2:
case 0xaa:
case 0xae:
case 0x24:
case 0x38:
case 0x3d:
case 0xb6:
case 0xea: /* alternate number for WRITE LONG */
current_SC->SCp.have_data_in = -1;
outb(0xd0 | PARITY_MASK, TMC_Cntl_port);
break;
case 0x00:
default:
current_SC->SCp.have_data_in = 1;
outb(0x90 | PARITY_MASK, TMC_Cntl_port);
break;
}
}
if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
while ((data_count = FIFO_Size - inw(FIFO_Data_Count_port)) > 512) {
#if EVERY_ACCESS
printk("DC=%d, ", data_count);
#endif
if (data_count > current_SC->SCp.this_residual)
data_count = current_SC->SCp.this_residual;
if (data_count > 0) {
#if EVERY_ACCESS
printk("%d OUT, ", data_count);
#endif
if (data_count == 1) {
Bytes_Written++;
outb(*current_SC->SCp.ptr++, Write_FIFO_port);
--current_SC->SCp.this_residual;
} else {
data_count >>= 1;
tmp_count = data_count << 1;
outsw(Write_FIFO_port, current_SC->SCp.ptr, data_count);
current_SC->SCp.ptr += tmp_count;
Bytes_Written += tmp_count;
current_SC->SCp.this_residual -= tmp_count;
}
}
if (!current_SC->SCp.this_residual) {
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
}
}
} else if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
while ((data_count = inw(FIFO_Data_Count_port)) > 0) {
#if EVERY_ACCESS
printk("DC=%d, ", data_count);
#endif
if (data_count > current_SC->SCp.this_residual)
data_count = current_SC->SCp.this_residual;
if (data_count) {
#if EVERY_ACCESS
printk("%d IN, ", data_count);
#endif
if (data_count == 1) {
Bytes_Read++;
*current_SC->SCp.ptr++ = inb(Read_FIFO_port);
--current_SC->SCp.this_residual;
} else {
data_count >>= 1; /* Number of words */
tmp_count = data_count << 1;
insw(Read_FIFO_port, current_SC->SCp.ptr, data_count);
current_SC->SCp.ptr += tmp_count;
Bytes_Read += tmp_count;
current_SC->SCp.this_residual -= tmp_count;
}
}
if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
}
if (done) {
#if EVERY_ACCESS
printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in);
#endif
#if EVERY_ACCESS
printk("BEFORE MY_DONE. . .");
#endif
spin_lock_irqsave(shpnt->host_lock, flags);
my_done(shpnt, (current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
spin_unlock_irqrestore(shpnt->host_lock, flags);
#if EVERY_ACCESS
printk("RETURNING.\n");
#endif
} else {
if (current_SC->SCp.phase & disconnect) {
outb(0xd0 | FIFO_COUNT, Interrupt_Cntl_port);
outb(0x00, SCSI_Cntl_port);
} else {
outb(0x90 | FIFO_COUNT, Interrupt_Cntl_port);
}
}
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
}
static int fd_mcs_release(struct Scsi_Host *shpnt)
{
int i, this_host, irq_usage;
release_region(shpnt->io_port, shpnt->n_io_port);
this_host = -1;
irq_usage = 0;
for (i = 0; i < found; i++) {
if (shpnt == hosts[i])
this_host = i;
if (shpnt->irq == hosts[i]->irq)
irq_usage++;
}
/* only for the last one */
if (1 == irq_usage)
free_irq(shpnt->irq, hosts);
found--;
for (i = this_host; i < found; i++)
hosts[i] = hosts[i + 1];
hosts[found] = NULL;
return 0;
}
static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
if (in_command) {
panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n");
}
#if EVERY_ACCESS
printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->target, *(unsigned char *) SCpnt->cmnd,
scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
#endif
fd_mcs_make_bus_idle(shpnt);
SCpnt->scsi_done = done; /* Save this for the done function */
current_SC = SCpnt;
/* Initialize static data */
if (scsi_bufflen(current_SC)) {
current_SC->SCp.buffer = scsi_sglist(current_SC);
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
current_SC->SCp.ptr = NULL;
current_SC->SCp.this_residual = 0;
current_SC->SCp.buffer = NULL;
current_SC->SCp.buffers_residual = 0;
}
current_SC->SCp.Status = 0;
current_SC->SCp.Message = 0;
current_SC->SCp.have_data_in = 0;
current_SC->SCp.sent_command = 0;
current_SC->SCp.phase = in_arbitration;
/* Start arbitration */
outb(0x00, Interrupt_Cntl_port);
outb(0x00, SCSI_Cntl_port); /* Disable data drivers */
outb(adapter_mask, SCSI_Data_NoACK_port); /* Set our id bit */
in_command = 1;
outb(0x20, Interrupt_Cntl_port);
outb(0x14 | PARITY_MASK, TMC_Cntl_port); /* Start arbitration */
return 0;
}
static DEF_SCSI_QCMD(fd_mcs_queue)
#if DEBUG_ABORT || DEBUG_RESET
static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
{
unsigned int imr;
unsigned int irr;
unsigned int isr;
struct Scsi_Host *shpnt = SCpnt->host;
if (!SCpnt || !SCpnt->host) {
printk("fd_mcs: cannot provide detailed information\n");
}
printk("%s\n", fd_mcs_info(SCpnt->host));
print_banner(SCpnt->host);
switch (SCpnt->SCp.phase) {
case in_arbitration:
printk("arbitration ");
break;
case in_selection:
printk("selection ");
break;
case in_other:
printk("other ");
break;
default:
printk("unknown ");
break;
}
printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd,
scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout);
#if DEBUG_RACE
printk("in_interrupt_flag = %d\n", in_interrupt_flag);
#endif
imr = (inb(0x0a1) << 8) + inb(0x21);
outb(0x0a, 0xa0);
irr = inb(0xa0) << 8;
outb(0x0a, 0x20);
irr += inb(0x20);
outb(0x0b, 0xa0);
isr = inb(0xa0) << 8;
outb(0x0b, 0x20);
isr += inb(0x20);
/* Print out interesting information */
printk("IMR = 0x%04x", imr);
if (imr & (1 << shpnt->irq))
printk(" (masked)");
printk(", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr);
printk("SCSI Status = 0x%02x\n", inb(SCSI_Status_port));
printk("TMC Status = 0x%02x", inb(TMC_Status_port));
if (inb(TMC_Status_port) & 1)
printk(" (interrupt)");
printk("\n");
printk("Interrupt Status = 0x%02x", inb(Interrupt_Status_port));
if (inb(Interrupt_Status_port) & 0x08)
printk(" (enabled)");
printk("\n");
if (chip == tmc18c50 || chip == tmc18c30) {
printk("FIFO Status = 0x%02x\n", inb(shpnt->io_port + FIFO_Status));
printk("Int. Condition = 0x%02x\n", inb(shpnt->io_port + Interrupt_Cond));
}
printk("Configuration 1 = 0x%02x\n", inb(shpnt->io_port + Configuration1));
if (chip == tmc18c50 || chip == tmc18c30)
printk("Configuration 2 = 0x%02x\n", inb(shpnt->io_port + Configuration2));
}
#endif
static int fd_mcs_abort(Scsi_Cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
printk("fd_mcs: abort ");
#endif
spin_lock_irqsave(shpnt->host_lock, flags);
if (!in_command) {
#if EVERY_ACCESS || ERRORS_ONLY
printk(" (not in command)\n");
#endif
spin_unlock_irqrestore(shpnt->host_lock, flags);
return FAILED;
} else
printk("\n");
#if DEBUG_ABORT
fd_mcs_print_info(SCpnt);
#endif
fd_mcs_make_bus_idle(shpnt);
current_SC->SCp.phase |= aborted;
current_SC->result = DID_ABORT << 16;
/* Aborts are not done well. . . */
my_done(shpnt, DID_ABORT << 16);
spin_unlock_irqrestore(shpnt->host_lock, flags);
return SUCCESS;
}
static int fd_mcs_bus_reset(Scsi_Cmnd * SCpnt) {
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
#if DEBUG_RESET
static int called_once = 0;
#endif
#if ERRORS_ONLY
if (SCpnt)
printk("fd_mcs: SCSI Bus Reset\n");
#endif
#if DEBUG_RESET
if (called_once)
fd_mcs_print_info(current_SC);
called_once = 1;
#endif
spin_lock_irqsave(shpnt->host_lock, flags);
outb(1, SCSI_Cntl_port);
do_pause(2);
outb(0, SCSI_Cntl_port);
do_pause(115);
outb(0, SCSI_Mode_Cntl_port);
outb(PARITY_MASK, TMC_Cntl_port);
spin_unlock_irqrestore(shpnt->host_lock, flags);
/* Unless this is the very first call (i.e., SCPnt == NULL), everything
is probably hosed at this point. We will, however, try to keep
things going by informing the high-level code that we need help. */
return SUCCESS;
}
#include <scsi/scsi_ioctl.h>
static int fd_mcs_biosparam(struct scsi_device * disk, struct block_device *bdev,
sector_t capacity, int *info_array)
{
unsigned char *p = scsi_bios_ptable(bdev);
int size = capacity;
/* BIOS >= 3.4 for MCA cards */
/* This algorithm was provided by Future Domain (much thanks!). */
if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
&& p[4]) { /* Partition type */
/* The partition table layout is as follows:
Start: 0x1b3h
Offset: 0 = partition status
1 = starting head
2 = starting sector and cylinder (word, encoded)
4 = partition type
5 = ending head
6 = ending sector and cylinder (word, encoded)
8 = starting absolute sector (double word)
c = number of sectors (double word)
Signature: 0x1fe = 0x55aa
So, this algorithm assumes:
1) the first partition table is in use,
2) the data in the first entry is correct, and
3) partitions never divide cylinders
Note that (1) may be FALSE for NetBSD (and other BSD flavors),
as well as for Linux. Note also, that Linux doesn't pay any
attention to the fields that are used by this algorithm -- it
only uses the absolute sector data. Recent versions of Linux's
fdisk(1) will fill this data in correctly, and forthcoming
versions will check for consistency.
Checking for a non-zero partition type is not part of the
Future Domain algorithm, but it seemed to be a reasonable thing
to do, especially in the Linux and BSD worlds. */
info_array[0] = p[5] + 1; /* heads */
info_array[1] = p[6] & 0x3f; /* sectors */
} else {
/* Note that this new method guarantees that there will always be
less than 1024 cylinders on a platter. This is good for drives
up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
if ((unsigned int) size >= 0x7e0000U)
{
info_array[0] = 0xff; /* heads = 255 */
info_array[1] = 0x3f; /* sectors = 63 */
} else if ((unsigned int) size >= 0x200000U) {
info_array[0] = 0x80; /* heads = 128 */
info_array[1] = 0x3f; /* sectors = 63 */
} else {
info_array[0] = 0x40; /* heads = 64 */
info_array[1] = 0x20; /* sectors = 32 */
}
}
/* For both methods, compute the cylinders */
info_array[2] = (unsigned int) size / (info_array[0] * info_array[1]);
kfree(p);
return 0;
}
static struct scsi_host_template driver_template = {
.proc_name = "fd_mcs",
.proc_info = fd_mcs_proc_info,
.detect = fd_mcs_detect,
.release = fd_mcs_release,
.info = fd_mcs_info,
.queuecommand = fd_mcs_queue,
.eh_abort_handler = fd_mcs_abort,
.eh_bus_reset_handler = fd_mcs_bus_reset,
.bios_param = fd_mcs_biosparam,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 64,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");
| gpl-2.0 |
wangenau/samsung-kernel-msm7x30 | sound/soc/msm/msm-dai-stub.c | 3340 | 2554 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
static int msm_dai_stub_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
pr_debug("%s:\n", __func__);
return 0;
}
static struct snd_soc_dai_ops msm_dai_stub_ops = {
.set_channel_map = msm_dai_stub_set_channel_map,
};
static struct snd_soc_dai_driver msm_dai_stub_dai = {
.playback = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
.capture = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
.ops = &msm_dai_stub_ops,
};
static __devinit int msm_dai_stub_dev_probe(struct platform_device *pdev)
{
int rc = 0;
dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
rc = snd_soc_register_dai(&pdev->dev, &msm_dai_stub_dai);
return rc;
}
static __devexit int msm_dai_stub_dev_remove(struct platform_device *pdev)
{
pr_debug("%s:\n", __func__);
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver msm_dai_stub_driver = {
.probe = msm_dai_stub_dev_probe,
.remove = msm_dai_stub_dev_remove,
.driver = {
.name = "msm-dai-stub",
.owner = THIS_MODULE,
},
};
static int __init msm_dai_stub_init(void)
{
pr_debug("%s:\n", __func__);
return platform_driver_register(&msm_dai_stub_driver);
}
module_init(msm_dai_stub_init);
static void __exit msm_dai_stub_exit(void)
{
pr_debug("%s:\n", __func__);
platform_driver_unregister(&msm_dai_stub_driver);
}
module_exit(msm_dai_stub_exit);
/* Module information */
MODULE_DESCRIPTION("MSM Stub DSP DAI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jbats/android_kernel_motorola_msm8992 | arch/mips/lantiq/xway/prom.c | 4108 | 2489 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
#include <linux/export.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <lantiq_soc.h>
#include "../prom.h"
#define SOC_DANUBE "Danube"
#define SOC_TWINPASS "Twinpass"
#define SOC_AMAZON_SE "Amazon_SE"
#define SOC_AR9 "AR9"
#define SOC_GR9 "GR9"
#define SOC_VR9 "VR9"
#define COMP_DANUBE "lantiq,danube"
#define COMP_TWINPASS "lantiq,twinpass"
#define COMP_AMAZON_SE "lantiq,ase"
#define COMP_AR9 "lantiq,ar9"
#define COMP_GR9 "lantiq,gr9"
#define COMP_VR9 "lantiq,vr9"
#define PART_SHIFT 12
#define PART_MASK 0x0FFFFFFF
#define REV_SHIFT 28
#define REV_MASK 0xF0000000
void __init ltq_soc_detect(struct ltq_soc_info *i)
{
i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
sprintf(i->rev_type, "1.%d", i->rev);
switch (i->partnum) {
case SOC_ID_DANUBE1:
case SOC_ID_DANUBE2:
i->name = SOC_DANUBE;
i->type = SOC_TYPE_DANUBE;
i->compatible = COMP_DANUBE;
break;
case SOC_ID_TWINPASS:
i->name = SOC_TWINPASS;
i->type = SOC_TYPE_DANUBE;
i->compatible = COMP_TWINPASS;
break;
case SOC_ID_ARX188:
case SOC_ID_ARX168_1:
case SOC_ID_ARX168_2:
case SOC_ID_ARX182:
i->name = SOC_AR9;
i->type = SOC_TYPE_AR9;
i->compatible = COMP_AR9;
break;
case SOC_ID_GRX188:
case SOC_ID_GRX168:
i->name = SOC_GR9;
i->type = SOC_TYPE_AR9;
i->compatible = COMP_GR9;
break;
case SOC_ID_AMAZON_SE_1:
case SOC_ID_AMAZON_SE_2:
#ifdef CONFIG_PCI
panic("ase is only supported for non pci kernels");
#endif
i->name = SOC_AMAZON_SE;
i->type = SOC_TYPE_AMAZON_SE;
i->compatible = COMP_AMAZON_SE;
break;
case SOC_ID_VRX282:
case SOC_ID_VRX268:
case SOC_ID_VRX288:
i->name = SOC_VR9;
i->type = SOC_TYPE_VR9;
i->compatible = COMP_VR9;
break;
case SOC_ID_GRX268:
case SOC_ID_GRX288:
i->name = SOC_GR9;
i->type = SOC_TYPE_VR9;
i->compatible = COMP_GR9;
break;
case SOC_ID_VRX268_2:
case SOC_ID_VRX288_2:
i->name = SOC_VR9;
i->type = SOC_TYPE_VR9_2;
i->compatible = COMP_VR9;
break;
case SOC_ID_GRX282_2:
case SOC_ID_GRX288_2:
i->name = SOC_GR9;
i->type = SOC_TYPE_VR9_2;
i->compatible = COMP_GR9;
break;
default:
unreachable();
break;
}
}
| gpl-2.0 |
keyser84/android_kernel_motorola_msm8226 | arch/powerpc/kvm/book3s_exports.c | 4876 | 1116 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright SUSE Linux Products GmbH 2009
*
* Authors: Alexander Graf <agraf@suse.de>
*/
#include <linux/export.h>
#include <asm/kvm_book3s.h>
#ifdef CONFIG_KVM_BOOK3S_64_HV
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
#else
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#endif
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
#endif
#endif
| gpl-2.0 |
ruslan250283/alcatel_6036 | drivers/media/video/cx23885/cx23885-cards.c | 4876 | 45692 | /*
* Driver for the Conexant CX23885 PCIe bridge
*
* Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <media/cx25840.h>
#include <linux/firmware.h>
#include <misc/altera.h>
#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-eeprom.h"
#include "netup-init.h"
#include "altera-ci.h"
#include "xc4000.h"
#include "xc5000.h"
#include "cx23888-ir.h"
static unsigned int netup_card_rev = 1;
module_param(netup_card_rev, int, 0644);
MODULE_PARM_DESC(netup_card_rev,
"NetUP Dual DVB-T/C CI card revision");
static unsigned int enable_885_ir;
module_param(enable_885_ir, int, 0644);
MODULE_PARM_DESC(enable_885_ir,
"Enable integrated IR controller for supported\n"
"\t\t CX2388[57] boards that are wired for it:\n"
"\t\t\tHVR-1250 (reported safe)\n"
"\t\t\tTeVii S470 (reported unsafe)\n"
"\t\t This can cause an interrupt storm with some cards.\n"
"\t\t Default: 0 [Disabled]");
/* ------------------------------------------------------------------ */
/* board config info */
struct cx23885_board cx23885_boards[] = {
[CX23885_BOARD_UNKNOWN] = {
.name = "UNKNOWN/GENERIC",
/* Ensure safe default for unknown boards */
.clk_freq = 0,
.input = {{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 0,
}, {
.type = CX23885_VMUX_COMPOSITE2,
.vmux = 1,
}, {
.type = CX23885_VMUX_COMPOSITE3,
.vmux = 2,
}, {
.type = CX23885_VMUX_COMPOSITE4,
.vmux = 3,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1800lp] = {
.name = "Hauppauge WinTV-HVR1800lp",
.portc = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00,
}, {
.type = CX23885_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1800] = {
.name = "Hauppauge WinTV-HVR1800",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_ENCODER,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = 0x42, /* 0x84 >> 1 */
.tuner_bus = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1,
.amux = CX25840_AUDIO8,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
.name = "Hauppauge WinTV-HVR1250",
.portc = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00,
}, {
.type = CX23885_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
} },
},
[CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP] = {
.name = "DViCO FusionHDTV5 Express",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1500Q] = {
.name = "Hauppauge WinTV-HVR1500Q",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1500] = {
.name = "Hauppauge WinTV-HVR1500",
.porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61, /* 0xc2 >> 1 */
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.gpio0 = 0,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1200] = {
.name = "Hauppauge WinTV-HVR1200",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1700] = {
.name = "Hauppauge WinTV-HVR1700",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1400] = {
.name = "Hauppauge WinTV-HVR1400",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP] = {
.name = "DViCO FusionHDTV7 Dual Express",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP] = {
.name = "DViCO FusionHDTV DVB-T Dual Express",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
.name = "Leadtek Winfast PxDVR3200 H XC4000",
.porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC4000,
.tuner_addr = 0x61,
.radio_type = UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2 |
CX25840_NONE0_CH3,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
}, {
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_VIN7_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN8_CH3 |
CX25840_COMPONENT_ON,
} },
},
[CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
.name = "Compro VideoMate E650F",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_TBS_6920] = {
.name = "TurboSight TBS 6920",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_TEVII_S470] = {
.name = "TeVii S470",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVBWORLD_2005] = {
.name = "DVBWorld DVB-S2 2005",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_NETUP_DUAL_DVBS2_CI] = {
.ci_type = 1,
.name = "NetUP Dual DVB-S2 CI",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1270] = {
.name = "Hauppauge WinTV-HVR1270",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1275] = {
.name = "Hauppauge WinTV-HVR1275",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1255] = {
.name = "Hauppauge WinTV-HVR1255",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1210] = {
.name = "Hauppauge WinTV-HVR1210",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_MYGICA_X8506] = {
.name = "Mygica X8506 DMB-TH",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_MAGICPRO_PROHDTVE2] = {
.name = "Magic-Pro ProHDTV Extreme 2",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_HAUPPAUGE_HVR1850] = {
.name = "Hauppauge WinTV-HVR1850",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_ENCODER,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_ABSENT,
.tuner_addr = 0x42, /* 0x84 >> 1 */
.force_bff = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1 |
CX25840_DIF_ON,
.amux = CX25840_AUDIO8,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.amux = CX25840_AUDIO7,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.amux = CX25840_AUDIO7,
} },
},
[CX23885_BOARD_COMPRO_VIDEOMATE_E800] = {
.name = "Compro VideoMate E800",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1290] = {
.name = "Hauppauge WinTV-HVR1290",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_MYGICA_X8558PRO] = {
.name = "Mygica X8558 PRO DMB-TH",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXTV1200] = {
.name = "LEADTEK WinFast PxTV1200",
.porta = CX23885_ANALOG_VIDEO,
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.tuner_bus = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2 |
CX25840_NONE0_CH3,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
}, {
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_VIN7_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN8_CH3 |
CX25840_COMPONENT_ON,
} },
},
[CX23885_BOARD_GOTVIEW_X5_3D_HYBRID] = {
.name = "GoTView X5 3D Hybrid",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x64,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2,
.gpio0 = 0x02,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX23885_VMUX_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
} },
},
[CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF] = {
.ci_type = 2,
.name = "NetUP Dual DVB-T/C-CI RF",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
.num_fds_portb = 2,
.num_fds_portc = 2,
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x64,
.input = { {
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE1,
} },
},
[CX23885_BOARD_MPX885] = {
.name = "MPX-885",
.porta = CX23885_ANALOG_VIDEO,
.input = {{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
.amux = CX25840_AUDIO6,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE2,
.vmux = CX25840_COMPOSITE2,
.amux = CX25840_AUDIO6,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE3,
.vmux = CX25840_COMPOSITE3,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE4,
.vmux = CX25840_COMPOSITE4,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
} },
},
[CX23885_BOARD_MYGICA_X8507] = {
.name = "Mygica X8507",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
.amux = CX25840_AUDIO8,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL] = {
.name = "TerraTec Cinergy T PCIe Dual",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
/* ------------------------------------------------------------------ */
/* PCI subsystem IDs */
struct cx23885_subid cx23885_subids[] = {
{
.subvendor = 0x0070,
.subdevice = 0x3400,
.card = CX23885_BOARD_UNKNOWN,
}, {
.subvendor = 0x0070,
.subdevice = 0x7600,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800lp,
}, {
.subvendor = 0x0070,
.subdevice = 0x7800,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7801,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7809,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7911,
.card = CX23885_BOARD_HAUPPAUGE_HVR1250,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd500,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP,
}, {
.subvendor = 0x0070,
.subdevice = 0x7790,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
}, {
.subvendor = 0x0070,
.subdevice = 0x7797,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
}, {
.subvendor = 0x0070,
.subdevice = 0x7710,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500,
}, {
.subvendor = 0x0070,
.subdevice = 0x7717,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500,
}, {
.subvendor = 0x0070,
.subdevice = 0x71d1,
.card = CX23885_BOARD_HAUPPAUGE_HVR1200,
}, {
.subvendor = 0x0070,
.subdevice = 0x71d3,
.card = CX23885_BOARD_HAUPPAUGE_HVR1200,
}, {
.subvendor = 0x0070,
.subdevice = 0x8101,
.card = CX23885_BOARD_HAUPPAUGE_HVR1700,
}, {
.subvendor = 0x0070,
.subdevice = 0x8010,
.card = CX23885_BOARD_HAUPPAUGE_HVR1400,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd618,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP,
}, {
.subvendor = 0x18ac,
.subdevice = 0xdb78,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP,
}, {
.subvendor = 0x107d,
.subdevice = 0x6681,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
.subvendor = 0x107d,
.subdevice = 0x6f39,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
}, {
.subvendor = 0x185b,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
}, {
.subvendor = 0x6920,
.subdevice = 0x8888,
.card = CX23885_BOARD_TBS_6920,
}, {
.subvendor = 0xd470,
.subdevice = 0x9022,
.card = CX23885_BOARD_TEVII_S470,
}, {
.subvendor = 0x0001,
.subdevice = 0x2005,
.card = CX23885_BOARD_DVBWORLD_2005,
}, {
.subvendor = 0x1b55,
.subdevice = 0x2a2c,
.card = CX23885_BOARD_NETUP_DUAL_DVBS2_CI,
}, {
.subvendor = 0x0070,
.subdevice = 0x2211,
.card = CX23885_BOARD_HAUPPAUGE_HVR1270,
}, {
.subvendor = 0x0070,
.subdevice = 0x2215,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x221d,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x2251,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x2259,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x2291,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x2295,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x2299,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x229d,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x0070,
.subdevice = 0x22f0,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f1,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f2,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f3,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x0070,
.subdevice = 0x22f4,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f5,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x14f1,
.subdevice = 0x8651,
.card = CX23885_BOARD_MYGICA_X8506,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8657,
.card = CX23885_BOARD_MAGICPRO_PROHDTVE2,
}, {
.subvendor = 0x0070,
.subdevice = 0x8541,
.card = CX23885_BOARD_HAUPPAUGE_HVR1850,
}, {
.subvendor = 0x1858,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E800,
}, {
.subvendor = 0x0070,
.subdevice = 0x8551,
.card = CX23885_BOARD_HAUPPAUGE_HVR1290,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8578,
.card = CX23885_BOARD_MYGICA_X8558PRO,
}, {
.subvendor = 0x107d,
.subdevice = 0x6f22,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXTV1200,
}, {
.subvendor = 0x5654,
.subdevice = 0x2390,
.card = CX23885_BOARD_GOTVIEW_X5_3D_HYBRID,
}, {
.subvendor = 0x1b55,
.subdevice = 0xe2e4,
.card = CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8502,
.card = CX23885_BOARD_MYGICA_X8507,
}, {
.subvendor = 0x153b,
.subdevice = 0x117e,
.card = CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
void cx23885_card_list(struct cx23885_dev *dev)
{
int i;
if (0 == dev->pci->subsystem_vendor &&
0 == dev->pci->subsystem_device) {
printk(KERN_INFO
"%s: Board has no valid PCIe Subsystem ID and can't\n"
"%s: be autodetected. Pass card=<n> insmod option\n"
"%s: to workaround that. Redirect complaints to the\n"
"%s: vendor of the TV card. Best regards,\n"
"%s: -- tux\n",
dev->name, dev->name, dev->name, dev->name, dev->name);
} else {
printk(KERN_INFO
"%s: Your board isn't known (yet) to the driver.\n"
"%s: Try to pick one of the existing card configs via\n"
"%s: card=<n> insmod option. Updating to the latest\n"
"%s: version might help as well.\n",
dev->name, dev->name, dev->name, dev->name);
}
printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
dev->name);
for (i = 0; i < cx23885_bcount; i++)
printk(KERN_INFO "%s: card=%d -> %s\n",
dev->name, i, cx23885_boards[i].name);
}
static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
case 22001:
/* WinTV-HVR1270 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Blast */
case 22009:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Blast */
case 22011:
/* WinTV-HVR1270 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Recv */
case 22019:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Recv */
case 22021:
/* WinTV-HVR1275 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Recv */
case 22029:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Recv */
case 22101:
/* WinTV-HVR1270 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Blast */
case 22109:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Blast */
case 22111:
/* WinTV-HVR1270 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Recv */
case 22119:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Recv */
case 22121:
/* WinTV-HVR1275 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Recv */
case 22129:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Recv */
case 71009:
/* WinTV-HVR1200 (PCIe, Retail, full height)
* DVB-T and basic analog */
case 71359:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71439:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71449:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71939:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71949:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71959:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71979:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71999:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 76601:
/* WinTV-HVR1800lp (PCIe, Retail, No IR, Dual
channel ATSC and MPEG2 HW Encoder */
case 77001:
/* WinTV-HVR1500 (Express Card, OEM, No IR, ATSC
and Basic analog */
case 77011:
/* WinTV-HVR1500 (Express Card, Retail, No IR, ATSC
and Basic analog */
case 77041:
/* WinTV-HVR1500Q (Express Card, OEM, No IR, ATSC/QAM
and Basic analog */
case 77051:
/* WinTV-HVR1500Q (Express Card, Retail, No IR, ATSC/QAM
and Basic analog */
case 78011:
/* WinTV-HVR1800 (PCIe, Retail, 3.5mm in, IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78501:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78521:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78531:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78631:
/* WinTV-HVR1800 (PCIe, OEM, No IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 79001:
/* WinTV-HVR1250 (PCIe, Retail, IR, full height,
ATSC and Basic analog */
case 79101:
/* WinTV-HVR1250 (PCIe, Retail, IR, half height,
ATSC and Basic analog */
case 79501:
/* WinTV-HVR1250 (PCIe, No IR, half height,
ATSC [at least] and Basic analog) */
case 79561:
/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
ATSC and Basic analog */
case 79571:
/* WinTV-HVR1250 (PCIe, OEM, No IR, full height,
ATSC and Basic analog */
case 79671:
/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
ATSC and Basic analog */
case 80019:
/* WinTV-HVR1400 (Express Card, Retail, IR,
* DVB-T and Basic analog */
case 81509:
/* WinTV-HVR1700 (PCIe, OEM, No IR, half height)
* DVB-T and MPEG2 HW Encoder */
case 81519:
/* WinTV-HVR1700 (PCIe, OEM, No IR, full height)
* DVB-T and MPEG2 HW Encoder */
break;
case 85021:
/* WinTV-HVR1850 (PCIe, Retail, 3.5mm in, IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
break;
case 85721:
/* WinTV-HVR1290 (PCIe, OEM, RCA in, IR,
Dual channel ATSC and Basic analog */
break;
default:
printk(KERN_WARNING "%s: warning: "
"unknown hauppauge model #%d\n",
dev->name, tv.model);
break;
}
printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
dev->name, tv.model);
}
int cx23885_tuner_callback(void *priv, int component, int command, int arg)
{
struct cx23885_tsport *port = priv;
struct cx23885_dev *dev = port->dev;
u32 bitmask = 0;
if (command == XC2028_RESET_CLK)
return 0;
if (command != 0) {
printk(KERN_ERR "%s(): Unknown command 0x%x.\n",
__func__, command);
return -EINVAL;
}
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
/* Tuner Reset Command */
bitmask = 0x04;
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
/* Two identical tuners on two different i2c buses,
* we need to reset the correct gpio. */
if (port->nr == 1)
bitmask = 0x01;
else if (port->nr == 2)
bitmask = 0x04;
break;
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
/* Tuner Reset Command */
bitmask = 0x02;
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
altera_ci_tuner_reset(dev, port->nr);
break;
}
if (bitmask) {
/* Drive the tuner into reset and back out */
cx_clear(GP0_IO, bitmask);
mdelay(200);
cx_set(GP0_IO, bitmask);
}
return 0;
}
void cx23885_gpio_setup(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* GPIO-0 cx24227 demodulator reset */
cx_set(GP0_IO, 0x00010001); /* Bring the part out of reset */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
/* GPIO-0 cx24227 demodulator */
/* GPIO-2 xc3028 tuner */
/* Put the parts into reset */
cx_set(GP0_IO, 0x00050000);
cx_clear(GP0_IO, 0x00000005);
msleep(5);
/* Bring the parts out of reset */
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
/* GPIO-0 cx24227 demodulator reset */
/* GPIO-2 xc5000 tuner reset */
cx_set(GP0_IO, 0x00050005); /* Bring the part out of reset */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
/* GPIO-0 656_CLK */
/* GPIO-1 656_D0 */
/* GPIO-2 8295A Reset */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* GPIO-19 IR_RX */
/* CX23417 GPIO's */
/* EIO15 Zilog Reset */
/* EIO14 S5H1409/CX24227 Reset */
mc417_gpio_enable(dev, GPIO_15 | GPIO_14, 1);
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_15 | GPIO_14);
mdelay(100);
/* Bring the demod and blaster out of reset */
mc417_gpio_set(dev, GPIO_15 | GPIO_14);
mdelay(100);
/* Force the TDA8295A into reset and back */
cx23885_gpio_enable(dev, GPIO_2, 1);
cx23885_gpio_set(dev, GPIO_2);
mdelay(20);
cx23885_gpio_clear(dev, GPIO_2);
mdelay(20);
cx23885_gpio_set(dev, GPIO_2);
mdelay(20);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1200:
/* GPIO-0 tda10048 demodulator reset */
/* GPIO-2 tda18271 tuner reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1700:
/* GPIO-0 TDA10048 demodulator reset */
/* GPIO-2 TDA8295A Reset */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* The following GPIO's are on the interna AVCore (cx25840) */
/* GPIO-19 IR_RX */
/* GPIO-20 IR_TX 416/DVBT Select */
/* GPIO-21 IIS DAT */
/* GPIO-22 IIS WCLK */
/* GPIO-23 IIS BCLK */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1400:
/* GPIO-0 Dibcom7000p demodulator reset */
/* GPIO-2 xc3028L tuner reset */
/* GPIO-13 LED */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
/* GPIO-0 xc5000 tuner reset i2c bus 0 */
/* GPIO-1 s5h1409 demod reset i2c bus 0 */
/* GPIO-2 xc5000 tuner reset i2c bus 1 */
/* GPIO-3 s5h1409 demod reset i2c bus 0 */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
mdelay(20);
cx_clear(GP0_IO, 0x0000000f);
mdelay(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
/* GPIO-0 portb xc3028 reset */
/* GPIO-1 portb zl10353 reset */
/* GPIO-2 portc xc3028 reset */
/* GPIO-3 portc zl10353 reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
mdelay(20);
cx_clear(GP0_IO, 0x0000000f);
mdelay(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
/* GPIO-2 xc3028 tuner reset */
/* The following GPIO's are on the internal AVCore (cx25840) */
/* GPIO-? zl10353 demod reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00040000);
mdelay(20);
cx_clear(GP0_IO, 0x00000004);
mdelay(20);
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
cx_set(MC417_RWD, 0x00000002);
mdelay(200);
cx_clear(MC417_RWD, 0x00000800);
mdelay(200);
cx_set(MC417_RWD, 0x00000800);
mdelay(200);
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
/* GPIO-0 INTA from CiMax1
GPIO-1 INTB from CiMax2
GPIO-2 reset chips
GPIO-3 to GPIO-10 data/addr for CA
GPIO-11 ~CS0 to CiMax1
GPIO-12 ~CS1 to CiMax2
GPIO-13 ADL0 load LSB addr
GPIO-14 ADL1 load MSB addr
GPIO-15 ~RDY from CiMax
GPIO-17 ~RD to CiMax
GPIO-18 ~WR to CiMax
*/
cx_set(GP0_IO, 0x00040000); /* GPIO as out */
/* GPIO1 and GPIO2 as INTA and INTB from CiMaxes, reset low */
cx_clear(GP0_IO, 0x00030004);
mdelay(100);/* reset delay */
cx_set(GP0_IO, 0x00040004); /* GPIO as out, reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
/* GPIO-15 IN as ~ACK, rest as OUT */
cx_write(MC417_OEN, 0x00001000);
/* ~RD, ~WR high; ADL0, ADL1 low; ~CS0, ~CS1 high */
cx_write(MC417_RWD, 0x0000c300);
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
/* GPIO-6 I2C Gate which can isolate the demod from the bus */
/* GPIO-9 Demod reset */
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
cx23885_gpio_clear(dev, GPIO_9);
mdelay(20);
cx23885_gpio_set(dev, GPIO_9);
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_MYGICA_X8507:
/* GPIO-0 (0)Analog / (1)Digital TV */
/* GPIO-1 reset XC5000 */
/* GPIO-2 reset LGS8GL5 / LGS8G75 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1 | GPIO_2, 1);
cx23885_gpio_clear(dev, GPIO_1 | GPIO_2);
mdelay(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1 | GPIO_2);
mdelay(100);
break;
case CX23885_BOARD_MYGICA_X8558PRO:
/* GPIO-0 reset first ATBM8830 */
/* GPIO-1 reset second ATBM8830 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1, 1);
cx23885_gpio_clear(dev, GPIO_0 | GPIO_1);
mdelay(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1);
mdelay(100);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
/* GPIO-0 656_CLK */
/* GPIO-1 656_D0 */
/* GPIO-2 Wake# */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* GPIO-19 IR_RX */
/* GPIO-20 C_IR_TX */
/* GPIO-21 I2S DAT */
/* GPIO-22 I2S WCLK */
/* GPIO-23 I2S BCLK */
/* ALT GPIO: EXP GPIO LATCH */
/* CX23417 GPIO's */
/* GPIO-14 S5H1411/CX24228 Reset */
/* GPIO-13 EEPROM write protect */
mc417_gpio_enable(dev, GPIO_14 | GPIO_13, 1);
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_14 | GPIO_13);
mdelay(100);
/* Bring the demod out of reset */
mc417_gpio_set(dev, GPIO_14);
mdelay(100);
/* CX24228 GPIO */
/* Connected to IF / Mux */
break;
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
cx_set(GP0_IO, 0x00010001); /* Bring the part out of reset */
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
/* GPIO-0 ~INT in
GPIO-1 TMS out
GPIO-2 ~reset chips out
GPIO-3 to GPIO-10 data/addr for CA in/out
GPIO-11 ~CS out
GPIO-12 ADDR out
GPIO-13 ~WR out
GPIO-14 ~RD out
GPIO-15 ~RDY in
GPIO-16 TCK out
GPIO-17 TDO in
GPIO-18 TDI out
*/
cx_set(GP0_IO, 0x00060000); /* GPIO-1,2 as out */
/* GPIO-0 as INT, reset & TMS low */
cx_clear(GP0_IO, 0x00010006);
mdelay(100);/* reset delay */
cx_set(GP0_IO, 0x00000004); /* reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO-3..18 pins */
/* GPIO-17 is TDO in, GPIO-15 is ~RDY in, rest is out */
cx_write(MC417_OEN, 0x00005000);
/* ~RD, ~WR high; ADDR low; ~CS high */
cx_write(MC417_RWD, 0x00000d00);
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
}
}
int cx23885_ir_init(struct cx23885_dev *dev)
{
static struct v4l2_subdev_io_pin_config ir_rxtx_pin_cfg[] = {
{
.flags = V4L2_SUBDEV_IO_PIN_INPUT,
.pin = CX23885_PIN_IR_RX_GPIO19,
.function = CX23885_PAD_IR_RX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}, {
.flags = V4L2_SUBDEV_IO_PIN_OUTPUT,
.pin = CX23885_PIN_IR_TX_GPIO20,
.function = CX23885_PAD_IR_TX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}
};
const size_t ir_rxtx_pin_cfg_count = ARRAY_SIZE(ir_rxtx_pin_cfg);
static struct v4l2_subdev_io_pin_config ir_rx_pin_cfg[] = {
{
.flags = V4L2_SUBDEV_IO_PIN_INPUT,
.pin = CX23885_PIN_IR_RX_GPIO19,
.function = CX23885_PAD_IR_RX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}
};
const size_t ir_rx_pin_cfg_count = ARRAY_SIZE(ir_rx_pin_cfg);
struct v4l2_subdev_ir_parameters params;
int ret = 0;
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
ret = cx23888_ir_probe(dev);
if (ret)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rx_pin_cfg_count, ir_rx_pin_cfg);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
ret = cx23888_ir_probe(dev);
if (ret)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
/*
* For these boards we need to invert the Tx output via the
* IR controller to have the LED off while idle
*/
v4l2_subdev_call(dev->sd_ir, ir, tx_g_parameters, ¶ms);
params.enable = false;
params.shutdown = false;
params.invert_level = true;
v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, ¶ms);
params.shutdown = true;
v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, ¶ms);
break;
case CX23885_BOARD_TEVII_S470:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
if (dev->sd_ir == NULL) {
ret = -ENODEV;
break;
}
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rx_pin_cfg_count, ir_rx_pin_cfg);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
if (dev->sd_ir == NULL) {
ret = -ENODEV;
break;
}
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
request_module("ir-kbd-i2c");
break;
}
return ret;
}
void cx23885_ir_fini(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
cx23885_irq_remove(dev, PCI_MSK_IR);
cx23888_ir_remove(dev);
dev->sd_ir = NULL;
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
break;
}
}
int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
{
int data;
int tdo = 0;
struct cx23885_dev *dev = (struct cx23885_dev *)device;
/*TMS*/
data = ((cx_read(GP0_IO)) & (~0x00000002));
data |= (tms ? 0x00020002 : 0x00020000);
cx_write(GP0_IO, data);
/*TDI*/
data = ((cx_read(MC417_RWD)) & (~0x0000a000));
data |= (tdi ? 0x00008000 : 0);
cx_write(MC417_RWD, data);
if (read_tdo)
tdo = (data & 0x00004000) ? 1 : 0; /*TDO*/
cx_write(MC417_RWD, data | 0x00002000);
udelay(1);
/*TCK*/
cx_write(MC417_RWD, data);
return tdo;
}
void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_IR);
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
}
}
void cx23885_card_setup(struct cx23885_dev *dev)
{
struct cx23885_tsport *ts1 = &dev->ts1;
struct cx23885_tsport *ts2 = &dev->ts2;
static u8 eeprom[256];
if (dev->i2c_bus[0].i2c_rc == 0) {
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client,
eeprom, sizeof(eeprom));
}
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (dev->i2c_bus[0].i2c_rc == 0) {
if (eeprom[0x80] != 0x84)
hauppauge_eeprom(dev, eeprom+0xc0);
else
hauppauge_eeprom(dev, eeprom+0x80);
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0x80);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
}
switch (dev->board) {
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
/* break omitted intentionally */
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
/* Defaults for VID B - Analog encoder */
/* DREQ_POL, SMODE, PUNC_CLK, MCLK_POL Serial bus + punc clk */
ts1->gen_ctrl_val = 0x10e;
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
/* APB_TSVALERR_POL (active low)*/
ts1->vld_misc_val = 0x2000;
ts1->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4 | 0xc);
cx_write(0x130184, 0xc);
/* Defaults for VID C */
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TBS_6920:
ts1->gen_ctrl_val = 0x4; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_DVBWORLD_2005:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_MYGICA_X8558PRO:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
default:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
}
/* Certain boards support analog, or require the avcore to be
* loaded, ensure this happens.
*/
switch (dev->board) {
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_MPX885:
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840) {
dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
}
break;
}
/* AUX-PLL 27MHz CLK */
switch (dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
netup_initialize(dev);
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: {
int ret;
const struct firmware *fw;
const char *filename = "dvb-netup-altera-01.fw";
char *action = "configure";
static struct netup_card_info cinfo;
struct altera_config netup_config = {
.dev = dev,
.action = action,
.jtag_io = netup_jtag_io,
};
netup_initialize(dev);
netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
if (netup_card_rev)
cinfo.rev = netup_card_rev;
switch (cinfo.rev) {
case 0x4:
filename = "dvb-netup-altera-04.fw";
break;
default:
filename = "dvb-netup-altera-01.fw";
break;
}
printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
cinfo.rev, filename);
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
printk(KERN_ERR "did not find the firmware file. (%s) "
"Please see linux/Documentation/dvb/ for more details "
"on firmware-problems.", filename);
else
altera_init(&netup_config, fw);
release_firmware(fw);
break;
}
}
}
/* ------------------------------------------------------------------ */
| gpl-2.0 |
AICP/kernel_xiaomi_cancro | arch/arm/mach-at91/leds.c | 5132 | 3691 | /*
* LED driver for Atmel AT91-based boards.
*
* Copyright (C) SAN People (Pty) Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/board.h>
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_NEW_LEDS)
/*
* New cross-platform LED support.
*/
static struct gpio_led_platform_data led_data;
static struct platform_device at91_gpio_leds_device = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &led_data,
};
void __init at91_gpio_leds(struct gpio_led *leds, int nr)
{
int i;
if (!nr)
return;
for (i = 0; i < nr; i++)
at91_set_gpio_output(leds[i].gpio, leds[i].active_low);
led_data.leds = leds;
led_data.num_leds = nr;
platform_device_register(&at91_gpio_leds_device);
}
#else
void __init at91_gpio_leds(struct gpio_led *leds, int nr) {}
#endif
/* ------------------------------------------------------------------------- */
#if defined (CONFIG_LEDS_ATMEL_PWM)
/*
* PWM Leds
*/
static struct gpio_led_platform_data pwm_led_data;
static struct platform_device at91_pwm_leds_device = {
.name = "leds-atmel-pwm",
.id = -1,
.dev.platform_data = &pwm_led_data,
};
void __init at91_pwm_leds(struct gpio_led *leds, int nr)
{
int i;
u32 pwm_mask = 0;
if (!nr)
return;
for (i = 0; i < nr; i++)
pwm_mask |= (1 << leds[i].gpio);
pwm_led_data.leds = leds;
pwm_led_data.num_leds = nr;
at91_add_device_pwm(pwm_mask);
platform_device_register(&at91_pwm_leds_device);
}
#else
void __init at91_pwm_leds(struct gpio_led *leds, int nr){}
#endif
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_LEDS)
#include <asm/leds.h>
/*
* Old ARM-specific LED framework; not fully functional when generic time is
* in use.
*/
static u8 at91_leds_cpu;
static u8 at91_leds_timer;
static inline void at91_led_on(unsigned int led)
{
at91_set_gpio_value(led, 0);
}
static inline void at91_led_off(unsigned int led)
{
at91_set_gpio_value(led, 1);
}
static inline void at91_led_toggle(unsigned int led)
{
unsigned long is_off = at91_get_gpio_value(led);
if (is_off)
at91_led_on(led);
else
at91_led_off(led);
}
/*
* Handle LED events.
*/
static void at91_leds_event(led_event_t evt)
{
unsigned long flags;
local_irq_save(flags);
switch(evt) {
case led_start: /* System startup */
at91_led_on(at91_leds_cpu);
break;
case led_stop: /* System stop / suspend */
at91_led_off(at91_leds_cpu);
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer: /* Every 50 timer ticks */
at91_led_toggle(at91_leds_timer);
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start: /* Entering idle state */
at91_led_off(at91_leds_cpu);
break;
case led_idle_end: /* Exit idle state */
at91_led_on(at91_leds_cpu);
break;
#endif
default:
break;
}
local_irq_restore(flags);
}
static int __init leds_init(void)
{
if (!at91_leds_timer || !at91_leds_cpu)
return -ENODEV;
leds_event = at91_leds_event;
leds_event(led_start);
return 0;
}
__initcall(leds_init);
void __init at91_init_leds(u8 cpu_led, u8 timer_led)
{
/* Enable GPIO to access the LEDs */
at91_set_gpio_output(cpu_led, 1);
at91_set_gpio_output(timer_led, 1);
at91_leds_cpu = cpu_led;
at91_leds_timer = timer_led;
}
#else
void __init at91_init_leds(u8 cpu_led, u8 timer_led) {}
#endif
| gpl-2.0 |
KaSt/Kappa34 | arch/sparc/kernel/chmc.c | 7436 | 20646 | /* chmc.c: Driver for UltraSPARC-III memory controller.
*
* Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/spitfire.h>
#include <asm/chmctrl.h>
#include <asm/cpudata.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/head.h>
#include <asm/io.h>
#include <asm/memctrl.h>
#define DRV_MODULE_NAME "chmc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.2"
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int mc_type;
#define MC_TYPE_SAFARI 1
#define MC_TYPE_JBUS 2
static dimm_printer_t us3mc_dimm_printer;
#define CHMCTRL_NDGRPS 2
#define CHMCTRL_NDIMMS 4
#define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
/* OBP memory-layout property format. */
struct chmc_obp_map {
unsigned char dimm_map[144];
unsigned char pin_map[576];
};
#define DIMM_LABEL_SZ 8
struct chmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct chmc_obp_map map[2];
};
#define CHMCTRL_NBANKS 4
struct chmc_bank_info {
struct chmc *p;
int bank_id;
u64 raw_reg;
int valid;
int uk;
int um;
int lk;
int lm;
int interleave;
unsigned long base;
unsigned long size;
};
struct chmc {
struct list_head list;
int portid;
struct chmc_obp_mem_layout layout_prop;
int layout_size;
void __iomem *regs;
u64 timing_control1;
u64 timing_control2;
u64 timing_control3;
u64 timing_control4;
u64 memaddr_control;
struct chmc_bank_info logical_banks[CHMCTRL_NBANKS];
};
#define JBUSMC_REGS_SIZE 8
#define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL
#define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL
#define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL
#define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL
#define JB_MC_REG1_XOR 0x0000010000000000UL
#define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL
#define JB_MC_REG1_ADDR_GEN_2_SHIFT 37
#define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL
#define JB_MC_REG1_ADDR_GEN_1_SHIFT 34
#define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL
#define JB_MC_REG1_INTERLEAVE_SHIFT 23
#define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL
#define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21
#define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL
#define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20
#define PART_TYPE_X8 0
#define PART_TYPE_X4 1
#define INTERLEAVE_NONE 0
#define INTERLEAVE_SAME 1
#define INTERLEAVE_INTERNAL 2
#define INTERLEAVE_BOTH 3
#define ADDR_GEN_128MB 0
#define ADDR_GEN_256MB 1
#define ADDR_GEN_512MB 2
#define ADDR_GEN_1GB 3
#define JB_NUM_DIMM_GROUPS 2
#define JB_NUM_DIMMS_PER_GROUP 2
#define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP)
struct jbusmc_obp_map {
unsigned char dimm_map[18];
unsigned char pin_map[144];
};
struct jbusmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct jbusmc_obp_map map;
char _pad;
};
struct jbusmc_dimm_group {
struct jbusmc *controller;
int index;
u64 base_addr;
u64 size;
};
struct jbusmc {
void __iomem *regs;
u64 mc_reg_1;
u32 portid;
struct jbusmc_obp_mem_layout layout;
int layout_len;
int num_dimm_groups;
struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS];
struct list_head list;
};
static DEFINE_SPINLOCK(mctrl_list_lock);
static LIST_HEAD(mctrl_list);
static void mc_list_add(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_add(list, &mctrl_list);
spin_unlock(&mctrl_list_lock);
}
static void mc_list_del(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_del_init(list);
spin_unlock(&mctrl_list_lock);
}
#define SYNDROME_MIN -1
#define SYNDROME_MAX 144
/* Covert syndrome code into the way the bits are positioned
* on the bus.
*/
static int syndrome_to_qword_code(int syndrome_code)
{
if (syndrome_code < 128)
syndrome_code += 16;
else if (syndrome_code < 128 + 9)
syndrome_code -= (128 - 7);
else if (syndrome_code < (128 + 9 + 3))
syndrome_code -= (128 + 9 - 4);
else
syndrome_code -= (128 + 9 + 3);
return syndrome_code;
}
/* All this magic has to do with how a cache line comes over the wire
* on Safari and JBUS. A 64-bit line comes over in 1 or more quadword
* cycles, each of which transmit ECC/MTAG info as well as the actual
* data.
*/
#define L2_LINE_SIZE 64
#define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1)
#define QW_PER_LINE 4
#define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE)
#define QW_BITS 144
#define SAFARI_LAST_BIT (576 - 1)
#define JBUS_LAST_BIT (144 - 1)
static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr,
int *pin_p, char **dimm_str_p, void *_prop,
int base_dimm_offset)
{
int qword_code = syndrome_to_qword_code(syndrome_code);
int cache_line_offset;
int offset_inverse;
int dimm_map_index;
int map_val;
if (mc_type == MC_TYPE_JBUS) {
struct jbusmc_obp_mem_layout *p = _prop;
/* JBUS */
cache_line_offset = qword_code;
offset_inverse = (JBUS_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse / 8;
map_val = p->map.dimm_map[dimm_map_index];
map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = p->map.pin_map[cache_line_offset];
} else {
struct chmc_obp_mem_layout *p = _prop;
struct chmc_obp_map *mp;
int qword;
/* Safari */
if (p->symmetric)
mp = &p->map[0];
else
mp = &p->map[1];
qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES;
cache_line_offset = ((3 - qword) * QW_BITS) + qword_code;
offset_inverse = (SAFARI_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse >> 2;
map_val = mp->dimm_map[dimm_map_index];
map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = mp->pin_map[cache_line_offset];
}
}
static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr)
{
struct jbusmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int i;
for (i = 0; i < p->num_dimm_groups; i++) {
struct jbusmc_dimm_group *dp = &p->dimm_groups[i];
if (phys_addr < dp->base_addr ||
(dp->base_addr + dp->size) <= phys_addr)
continue;
return dp;
}
}
return NULL;
}
static int jbusmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct jbusmc_obp_mem_layout *prop;
struct jbusmc_dimm_group *dp;
struct jbusmc *p;
int first_dimm;
dp = jbusmc_find_dimm_group(phys_addr);
if (dp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
p = dp->controller;
prop = &p->layout;
first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this dimm group.
*/
for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
static u64 __devinit jbusmc_dimm_group_size(u64 base,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
u64 max = base + (8UL * 1024 * 1024 * 1024);
u64 max_seen = base;
int i;
for (i = 0; i < num_mem_regs; i++) {
const struct linux_prom64_registers *ent;
u64 this_base;
u64 this_end;
ent = &mem_regs[i];
this_base = ent->phys_addr;
this_end = this_base + ent->reg_size;
if (base < this_base || base >= this_end)
continue;
if (this_end > max)
this_end = max;
if (this_end > max_seen)
max_seen = this_end;
}
return max_seen - base;
}
static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p,
unsigned long index,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
struct jbusmc_dimm_group *dp = &p->dimm_groups[index];
dp->controller = p;
dp->index = index;
dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024));
dp->base_addr += (index * (8UL * 1024 * 1024 * 1024));
dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs);
}
static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) {
jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) {
jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
}
static int __devinit jbusmc_probe(struct platform_device *op)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
int err, len, num_mem_regs;
struct jbusmc *p;
const u32 *prop;
const void *ml;
err = -ENODEV;
mem_node = of_find_node_by_path("/memory");
if (!mem_node) {
printk(KERN_ERR PFX "Cannot find /memory node.\n");
goto out;
}
mem_regs = of_get_property(mem_node, "reg", &len);
if (!mem_regs) {
printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n");
goto out;
}
num_mem_regs = len / sizeof(*mem_regs);
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n");
goto out;
}
INIT_LIST_HEAD(&p->list);
err = -ENODEV;
prop = of_get_property(op->dev.of_node, "portid", &len);
if (!prop || len != 4) {
printk(KERN_ERR PFX "Cannot find portid.\n");
goto out_free;
}
p->portid = *prop;
prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len);
if (!prop || len != 8) {
printk(KERN_ERR PFX "Cannot get memory control register 1.\n");
goto out_free;
}
p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1];
err = -ENOMEM;
p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc");
if (!p->regs) {
printk(KERN_ERR PFX "Cannot map jbusmc regs.\n");
goto out_free;
}
err = -ENODEV;
ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len);
if (!ml) {
printk(KERN_ERR PFX "Cannot get memory layout property.\n");
goto out_iounmap;
}
if (p->layout_len > sizeof(p->layout)) {
printk(KERN_ERR PFX "Unexpected memory-layout size %d\n",
p->layout_len);
goto out_iounmap;
}
memcpy(&p->layout, ml, p->layout_len);
jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
op->dev.of_node->full_name);
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_iounmap:
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
out_free:
kfree(p);
goto out;
}
/* Does BANK decode PHYS_ADDR? */
static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr)
{
unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
/* Bank must be enabled to match. */
if (bp->valid == 0)
return 0;
/* Would BANK match upper bits? */
upper_bits ^= bp->um; /* What bits are different? */
upper_bits = ~upper_bits; /* Invert. */
upper_bits |= bp->uk; /* What bits don't matter for matching? */
upper_bits = ~upper_bits; /* Invert. */
if (upper_bits)
return 0;
/* Would BANK match lower bits? */
lower_bits ^= bp->lm; /* What bits are different? */
lower_bits = ~lower_bits; /* Invert. */
lower_bits |= bp->lk; /* What bits don't matter for matching? */
lower_bits = ~lower_bits; /* Invert. */
if (lower_bits)
return 0;
/* I always knew you'd be the one. */
return 1;
}
/* Given PHYS_ADDR, search memory controller banks for a match. */
static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr)
{
struct chmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int bank_no;
for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
struct chmc_bank_info *bp;
bp = &p->logical_banks[bank_no];
if (chmc_bank_match(bp, phys_addr))
return bp;
}
}
return NULL;
}
/* This is the main purpose of this driver. */
static int chmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct chmc_bank_info *bp;
struct chmc_obp_mem_layout *prop;
int bank_in_controller, first_dimm;
bp = chmc_find_bank(phys_addr);
if (bp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
prop = &bp->p->layout_prop;
bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
first_dimm *= CHMCTRL_NDIMMS;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this bank.
*/
for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
/* Accessing the registers is slightly complicated. If you want
* to get at the memory controller which is on the same processor
* the code is executing, you must use special ASI load/store else
* you go through the global mapping.
*/
static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset)
{
unsigned long ret, this_cpu;
preempt_disable();
this_cpu = real_hard_smp_processor_id();
if (p->portid == this_cpu) {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
preempt_enable();
return ret;
}
#if 0 /* currently unused */
static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val)
{
if (p->portid == smp_processor_id()) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (val),
"r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa %0, [%1] %2"
: : "r" (val),
"r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
}
#endif
static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val)
{
struct chmc_bank_info *bp = &p->logical_banks[which_bank];
bp->p = p;
bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank;
bp->raw_reg = val;
bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
bp->base = (bp->um);
bp->base &= ~(bp->uk);
bp->base <<= PA_UPPER_BITS_SHIFT;
switch(bp->lk) {
case 0xf:
default:
bp->interleave = 1;
break;
case 0xe:
bp->interleave = 2;
break;
case 0xc:
bp->interleave = 4;
break;
case 0x8:
bp->interleave = 8;
break;
case 0x0:
bp->interleave = 16;
break;
}
/* UK[10] is reserved, and UK[11] is not set for the SDRAM
* bank size definition.
*/
bp->size = (((unsigned long)bp->uk &
((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
bp->size /= bp->interleave;
}
static void chmc_fetch_decode_regs(struct chmc *p)
{
if (p->layout_size == 0)
return;
chmc_interpret_one_decode_reg(p, 0,
chmc_read_mcreg(p, CHMCTRL_DECODE1));
chmc_interpret_one_decode_reg(p, 1,
chmc_read_mcreg(p, CHMCTRL_DECODE2));
chmc_interpret_one_decode_reg(p, 2,
chmc_read_mcreg(p, CHMCTRL_DECODE3));
chmc_interpret_one_decode_reg(p, 3,
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
static int __devinit chmc_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
unsigned long ver;
const void *pval;
int len, portid;
struct chmc *p;
int err;
err = -ENODEV;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID)
goto out;
portid = of_getintprop_default(dp, "portid", -1);
if (portid == -1)
goto out;
pval = of_get_property(dp, "memory-layout", &len);
if (pval && len > sizeof(p->layout_prop)) {
printk(KERN_ERR PFX "Unexpected memory-layout property "
"size %d.\n", len);
goto out;
}
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct chmc.\n");
goto out;
}
p->portid = portid;
p->layout_size = len;
if (!pval)
p->layout_size = 0;
else
memcpy(&p->layout_prop, pval, len);
p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc");
if (!p->regs) {
printk(KERN_ERR PFX "Could not map registers.\n");
goto out_free;
}
if (p->layout_size != 0UL) {
p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1);
p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2);
p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3);
p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4);
p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL);
}
chmc_fetch_decode_regs(p);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n",
dp->full_name,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_free:
kfree(p);
goto out;
}
static int __devinit us3mc_probe(struct platform_device *op)
{
if (mc_type == MC_TYPE_SAFARI)
return chmc_probe(op);
else if (mc_type == MC_TYPE_JBUS)
return jbusmc_probe(op);
return -ENODEV;
}
static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p)
{
list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, 0x48);
kfree(p);
}
static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p)
{
mc_list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
kfree(p);
}
static int __devexit us3mc_remove(struct platform_device *op)
{
void *p = dev_get_drvdata(&op->dev);
if (p) {
if (mc_type == MC_TYPE_SAFARI)
chmc_destroy(op, p);
else if (mc_type == MC_TYPE_JBUS)
jbusmc_destroy(op, p);
}
return 0;
}
static const struct of_device_id us3mc_match[] = {
{
.name = "memory-controller",
},
{},
};
MODULE_DEVICE_TABLE(of, us3mc_match);
static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
.owner = THIS_MODULE,
.of_match_table = us3mc_match,
},
.probe = us3mc_probe,
.remove = __devexit_p(us3mc_remove),
};
static inline bool us3mc_platform(void)
{
if (tlb_type == cheetah || tlb_type == cheetah_plus)
return true;
return false;
}
static int __init us3mc_init(void)
{
unsigned long ver;
int ret;
if (!us3mc_platform())
return -ENODEV;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID) {
mc_type = MC_TYPE_JBUS;
us3mc_dimm_printer = jbusmc_print_dimm;
} else {
mc_type = MC_TYPE_SAFARI;
us3mc_dimm_printer = chmc_print_dimm;
}
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
ret = platform_driver_register(&us3mc_driver);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
return ret;
}
static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
platform_driver_unregister(&us3mc_driver);
}
}
module_init(us3mc_init);
module_exit(us3mc_cleanup);
| gpl-2.0 |
khanfrd/Red-Kernel | drivers/isdn/hardware/eicon/s_4bri.c | 9740 | 15335 |
/*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#include "di_defs.h"
#include "pc.h"
#include "pr_pc.h"
#include "di.h"
#include "mi_pc.h"
#include "pc_maint.h"
#include "divasync.h"
#include "pc_init.h"
#include "io.h"
#include "helpers.h"
#include "dsrv4bri.h"
#include "dsp_defs.h"
#include "sdp_hdr.h"
/*****************************************************************************/
#define MAX_XLOG_SIZE (64 * 1024)
/* --------------------------------------------------------------------------
Recovery XLOG from QBRI Card
-------------------------------------------------------------------------- */
static void qBri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
byte __iomem *base;
word *Xlog;
dword regs[4], TrapID, offset, size;
Xdesc xlogDesc;
int factor = (IoAdapter->tasks == 1) ? 1 : 2;
/*
* check for trapped MIPS 46xx CPU, dump exception frame
*/
base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter);
offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor);
TrapID = READ_DWORD(&base[0x80]);
if ((TrapID == 0x99999999) || (TrapID == 0x99999901))
{
dump_trap_frame(IoAdapter, &base[0x90]);
IoAdapter->trapped = 1;
}
regs[0] = READ_DWORD((base + offset) + 0x70);
regs[1] = READ_DWORD((base + offset) + 0x74);
regs[2] = READ_DWORD((base + offset) + 0x78);
regs[3] = READ_DWORD((base + offset) + 0x7c);
regs[0] &= IoAdapter->MemorySize - 1;
if ((regs[0] >= offset)
&& (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1))
{
if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) {
DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
return;
}
size = offset + (IoAdapter->MemorySize >> factor) - regs[0];
if (size > MAX_XLOG_SIZE)
size = MAX_XLOG_SIZE;
memcpy_fromio(Xlog, &base[regs[0]], size);
xlogDesc.buf = Xlog;
xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]);
xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]);
dump_xlog_buffer(IoAdapter, &xlogDesc);
diva_os_free(0, Xlog);
IoAdapter->trapped = 2;
}
DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
}
/* --------------------------------------------------------------------------
Reset QBRI Hardware
-------------------------------------------------------------------------- */
static void reset_qBri_hardware(PISDN_ADAPTER IoAdapter) {
word volatile __iomem *qBriReset;
byte volatile __iomem *qBriCntrl;
byte volatile __iomem *p;
qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET);
diva_os_wait(1);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET);
diva_os_wait(1);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM);
diva_os_wait(1);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM);
diva_os_wait(1);
DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset);
qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
WRITE_DWORD(p, 0);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl);
DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset))
DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p))
}
/* --------------------------------------------------------------------------
Start Card CPU
-------------------------------------------------------------------------- */
void start_qBri_hardware(PISDN_ADAPTER IoAdapter) {
byte volatile __iomem *qBriReset;
byte volatile __iomem *p;
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK);
diva_os_wait(2);
WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK);
diva_os_wait(10);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
DBG_TRC(("started processor @ addr 0x%08lx", qBriReset))
}
/* --------------------------------------------------------------------------
Stop Card CPU
-------------------------------------------------------------------------- */
static void stop_qBri_hardware(PISDN_ADAPTER IoAdapter) {
byte volatile __iomem *p;
dword volatile __iomem *qBriReset;
dword volatile __iomem *qBriIrq;
dword volatile __iomem *qBriIsacDspReset;
int rev2 = DIVA_4BRI_REVISION(IoAdapter);
int reset_offset = rev2 ? (MQ2_BREG_RISC) : (MQ_BREG_RISC);
int irq_offset = rev2 ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST);
int hw_offset = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET);
if (IoAdapter->ControllerNumber > 0)
return;
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriReset = (dword volatile __iomem *)&p[reset_offset];
qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset];
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
WRITE_DWORD(qBriReset, 0);
WRITE_DWORD(qBriIsacDspReset, 0);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)&p[irq_offset];
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset))
}
/* --------------------------------------------------------------------------
FPGA download
-------------------------------------------------------------------------- */
#define FPGA_NAME_OFFSET 0x10
static byte *qBri_check_FPGAsrc(PISDN_ADAPTER IoAdapter, char *FileName,
dword *Length, dword *code) {
byte *File;
char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime;
dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i;
if (!(File = (byte *)xdiLoadFile(FileName, Length, 0))) {
return (NULL);
}
/*
* scan file until FF and put id string into buffer
*/
for (i = 0; File[i] != 0xff;)
{
if (++i >= *Length)
{
DBG_FTL(("FPGA download: start of data header not found"))
xdiFreeFile(File);
return (NULL);
}
}
*code = i++;
if ((File[i] & 0xF0) != 0x20)
{
DBG_FTL(("FPGA download: data header corrupted"))
xdiFreeFile(File);
return (NULL);
}
fpgaFlen = (dword)File[FPGA_NAME_OFFSET - 1];
if (fpgaFlen == 0)
fpgaFlen = 12;
fpgaFile = (char *)&File[FPGA_NAME_OFFSET];
fpgaTlen = (dword)fpgaFile[fpgaFlen + 2];
if (fpgaTlen == 0)
fpgaTlen = 10;
fpgaType = (char *)&fpgaFile[fpgaFlen + 3];
fpgaDlen = (dword) fpgaType[fpgaTlen + 2];
if (fpgaDlen == 0)
fpgaDlen = 11;
fpgaDate = (char *)&fpgaType[fpgaTlen + 3];
fpgaTime = (char *)&fpgaDate[fpgaDlen + 3];
cnt = (dword)(((File[i] & 0x0F) << 20) + (File[i + 1] << 12)
+ (File[i + 2] << 4) + (File[i + 3] >> 4));
if ((dword)(i + (cnt / 8)) > *Length)
{
DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)",
FileName, *Length, code + ((cnt + 7) / 8)))
xdiFreeFile(File);
return (NULL);
}
i = 0;
do
{
while ((fpgaDate[i] != '\0')
&& ((fpgaDate[i] < '0') || (fpgaDate[i] > '9')))
{
i++;
}
year = 0;
while ((fpgaDate[i] >= '0') && (fpgaDate[i] <= '9'))
year = year * 10 + (fpgaDate[i++] - '0');
} while ((year < 2000) && (fpgaDate[i] != '\0'));
switch (IoAdapter->cardType) {
case CARDTYPE_DIVASRV_B_2F_PCI:
break;
default:
if (year >= 2001) {
IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED;
}
}
DBG_LOG(("FPGA[%s] file %s (%s %s) len %d",
fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt))
return (File);
}
/******************************************************************************/
#define FPGA_PROG 0x0001 /* PROG enable low */
#define FPGA_BUSY 0x0002 /* BUSY high, DONE low */
#define FPGA_CS 0x000C /* Enable I/O pins */
#define FPGA_CCLK 0x0100
#define FPGA_DOUT 0x0400
#define FPGA_DIN FPGA_DOUT /* bidirectional I/O */
int qBri_FPGA_download(PISDN_ADAPTER IoAdapter) {
int bit;
byte *File;
dword code, FileLength;
word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
word val, baseval = FPGA_CS | FPGA_PROG;
if (DIVA_4BRI_REVISION(IoAdapter))
{
char *name;
switch (IoAdapter->cardType) {
case CARDTYPE_DIVASRV_B_2F_PCI:
name = "dsbri2f.bit";
break;
case CARDTYPE_DIVASRV_B_2M_V2_PCI:
case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
name = "dsbri2m.bit";
break;
default:
name = "ds4bri2.bit";
}
File = qBri_check_FPGAsrc(IoAdapter, name,
&FileLength, &code);
}
else
{
File = qBri_check_FPGAsrc(IoAdapter, "ds4bri.bit",
&FileLength, &code);
}
if (!File) {
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
return (0);
}
/*
* prepare download, pulse PROGRAM pin down.
*/
WRITE_WORD(addr, baseval & ~FPGA_PROG); /* PROGRAM low pulse */
WRITE_WORD(addr, baseval); /* release */
diva_os_wait(50); /* wait until FPGA finished internal memory clear */
/*
* check done pin, must be low
*/
if (READ_WORD(addr) & FPGA_BUSY)
{
DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing"))
xdiFreeFile(File);
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
return (0);
}
/*
* put data onto the FPGA
*/
while (code < FileLength)
{
val = ((word)File[code++]) << 3;
for (bit = 8; bit-- > 0; val <<= 1) /* put byte onto FPGA */
{
baseval &= ~FPGA_DOUT; /* clr data bit */
baseval |= (val & FPGA_DOUT); /* copy data bit */
WRITE_WORD(addr, baseval);
WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */
WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */
WRITE_WORD(addr, baseval); /* set CCLK lo */
}
}
xdiFreeFile(File);
diva_os_wait(100);
val = READ_WORD(addr);
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
if (!(val & FPGA_BUSY))
{
DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val))
return (0);
}
return (1);
}
static int load_qBri_hardware(PISDN_ADAPTER IoAdapter) {
return (0);
}
/* --------------------------------------------------------------------------
Card ISR
-------------------------------------------------------------------------- */
static int qBri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
dword volatile __iomem *qBriIrq;
PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList;
word i;
int serviced = 0;
byte __iomem *p;
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
if (!(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80)) {
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
return (0);
}
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
for (i = 0; i < IoAdapter->tasks; ++i)
{
IoAdapter = QuadroList->QuadroAdapter[i];
if (IoAdapter && IoAdapter->Initialized
&& IoAdapter->tst_irq(&IoAdapter->a))
{
IoAdapter->IrqCount++;
serviced = 1;
diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
}
}
return (serviced);
}
/* --------------------------------------------------------------------------
Does disable the interrupt on the card
-------------------------------------------------------------------------- */
static void disable_qBri_interrupt(PISDN_ADAPTER IoAdapter) {
dword volatile __iomem *qBriIrq;
byte __iomem *p;
if (IoAdapter->ControllerNumber > 0)
return;
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
}
/* --------------------------------------------------------------------------
Install Adapter Entry Points
-------------------------------------------------------------------------- */
static void set_common_qBri_functions(PISDN_ADAPTER IoAdapter) {
ADAPTER *a;
a = &IoAdapter->a;
a->ram_in = mem_in;
a->ram_inw = mem_inw;
a->ram_in_buffer = mem_in_buffer;
a->ram_look_ahead = mem_look_ahead;
a->ram_out = mem_out;
a->ram_outw = mem_outw;
a->ram_out_buffer = mem_out_buffer;
a->ram_inc = mem_inc;
IoAdapter->out = pr_out;
IoAdapter->dpc = pr_dpc;
IoAdapter->tst_irq = scom_test_int;
IoAdapter->clr_irq = scom_clear_int;
IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS;
IoAdapter->load = load_qBri_hardware;
IoAdapter->disIrq = disable_qBri_interrupt;
IoAdapter->rstFnc = reset_qBri_hardware;
IoAdapter->stop = stop_qBri_hardware;
IoAdapter->trapFnc = qBri_cpu_trapped;
IoAdapter->diva_isr_handler = qBri_ISR;
IoAdapter->a.io = (void *)IoAdapter;
}
static void set_qBri_functions(PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
IoAdapter->MemorySize = MQ_MEMORY_SIZE;
set_common_qBri_functions(IoAdapter);
diva_os_set_qBri_functions(IoAdapter);
}
static void set_qBri2_functions(PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE;
set_common_qBri_functions(IoAdapter);
diva_os_set_qBri2_functions(IoAdapter);
}
/******************************************************************************/
void prepare_qBri_functions(PISDN_ADAPTER IoAdapter) {
set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
}
void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
if (IoAdapter->tasks > 1) {
set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
}
}
/* -------------------------------------------------------------------------- */
| gpl-2.0 |
project-zerus/mariadb | tests/deadlock_test.c | 13 | 5699 | /* Copyright (C) 2000-2001, 2003-2004, 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#include <mysql.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
typedef unsigned char uchar;
static void die(char* fmt, ...);
static void safe_query(MYSQL* mysql, char* query, int read_ok);
static void run_query_batch(int* order, int num_queries);
static void permute(int *order, int num_queries);
static void permute_aux(int *order, int num_queries, int* fixed);
static void dump_result(MYSQL* mysql, char* query);
int count = 0;
struct query
{
MYSQL* mysql;
char* query;
int read_ok;
int pri;
int dump_result;
};
MYSQL lock, sel, del_ins;
struct query queries[] =
{
{&del_ins, "insert delayed into foo values(1)", 1, 0, 0},
{&del_ins, "insert delayed into foo values(1)", 1, 0, 0},
{&lock, "lock tables foo write", 1, 1, 0},
{&lock, "unlock tables", 1,2, 0},
{&sel, "select * from foo", 0,0, 0},
{&del_ins, "insert into foo values(4)", 0,3, 0},
{0,0,0}
};
static void die(char* fmt, ...)
{
va_list args;
va_start(args, fmt);
fprintf(stderr, "ERROR: ");
vfprintf(stderr, fmt, args);
fprintf(stderr, "\n");
va_end(args);
exit(1);
}
static void permute(int *order, int num_queries)
{
int *fixed;
if(num_queries < 2) return;
if(!(fixed = (int*)malloc(num_queries * sizeof(int))))
die("malloc() failed");
memset(fixed, 0, num_queries * sizeof(int));
permute_aux(order, num_queries, fixed);
free(fixed);
}
static order_ok(int *order, int num_queries)
{
int i,j, pri_i, pri_j;
for(i = 0; i < num_queries; i++)
{
if((pri_i = queries[order[i]].pri))
for(j = i + 1; j < num_queries; j++)
{
pri_j = queries[order[j]].pri;
if(pri_j && pri_i > pri_j)
return 0;
}
}
return 1;
}
static void permute_aux(int *order, int num_queries, int* fixed)
{
int *p,*p1,j,i,tmp, num_free = 0;
p = fixed;
for(i = 0; i < num_queries; i++, p++)
{
if(!*p)
{
num_free++;
*p = 1;
for(j = 0, p1 = fixed ;
j < num_queries; j++,p1++)
{
if(!*p1)
{
tmp = order[i];
order[i] = order[j];
order[j] = tmp;
*p1 = 1;
permute_aux(order, num_queries, fixed);
tmp = order[i];
order[i] = order[j];
order[j] = tmp;
*p1 = 0;
}
}
*p = 0;
}
}
/*printf("num_free = %d\n", num_free); */
if(num_free <= 1)
{
count++;
if(order_ok(order, num_queries))
run_query_batch(order, num_queries);
}
}
static void run_query_batch(int* order, int num_queries)
{
int i;
struct query* q;
int *save_order;
safe_query(&lock, "delete from foo", 1);
save_order = order;
for(i = 0; i < num_queries; i++,order++)
{
q = queries + *order;
printf("query='%s'\n", q->query);
safe_query(q->mysql, q->query, q->read_ok);
}
order = save_order;
for(i = 0; i < num_queries; i++,order++)
{
q = queries + *order;
if(q->dump_result)
dump_result(q->mysql, q->query);
}
printf("\n");
}
static void safe_net_read(NET* net, char* query)
{
int len;
len = my_net_read(net);
if(len == packet_error || !len)
die("Error running query '%s'", query);
if(net->read_pos[0] == 255)
die("Error running query '%s'", query);
}
static void safe_query(MYSQL* mysql, char* query, int read_ok)
{
int len;
NET* net = &mysql->net;
net_clear(net);
if(net_write_command(net,(uchar)COM_QUERY, query,strlen(query)))
die("Error running query '%s': %s", query, mysql_error(mysql));
if(read_ok)
{
safe_net_read(net, query);
}
}
static void dump_result(MYSQL* mysql, char* query)
{
MYSQL_RES* res;
safe_net_read(&mysql->net, query);
res = mysql_store_result(mysql);
if(res)
mysql_free_result(res);
}
static int* init_order(int* num_queries)
{
struct query* q;
int *order, *order_end, *p;
int n,i;
for(q = queries; q->mysql; q++)
;
n = q - queries;
if(!(order = (int*) malloc(n * sizeof(int))))
die("malloc() failed");
order_end = order + n;
for(p = order,i = 0; p < order_end; p++,i++)
*p = i;
*num_queries = n;
return order;
}
int main()
{
char* user = "root", *pass = "", *host = "localhost", *db = "test";
int *order, num_queries;
order = init_order(&num_queries);
if(!mysql_init(&lock) || !mysql_init(&sel) || !mysql_init(&del_ins))
die("error in mysql_init()");
mysql_options(&lock, MYSQL_READ_DEFAULT_GROUP, "mysql");
mysql_options(&sel, MYSQL_READ_DEFAULT_GROUP, "mysql");
mysql_options(&del_ins, MYSQL_READ_DEFAULT_GROUP, "mysql");
if(!mysql_real_connect(&lock, host, user, pass, db, 0,0,0 ) ||
!mysql_real_connect(&sel, host, user, pass, db, 0,0,0 ) ||
!mysql_real_connect(&del_ins, host, user, pass, db, 0,0,0 ))
die("Error in mysql_real_connect(): %s", mysql_error(&lock));
lock.reconnect= sel.reconnect= del_ins.reconnect= 1;
permute(order, num_queries);
printf("count = %d\n", count);
mysql_close(&lock);
mysql_close(&sel);
mysql_close(&del_ins);
free(order);
}
| gpl-2.0 |
nmacs/lm3s-uclinux | lib/libtomcrypt/libtomcrypt-1.17/src/pk/asn1/der/sequence/der_encode_sequence_multi.c | 13 | 3541 | /* LibTomCrypt, modular cryptographic library -- Tom St Denis
*
* LibTomCrypt is a library that provides various cryptographic
* algorithms in a highly modular and flexible manner.
*
* The library is free for all purposes without any express
* guarantee it works.
*
* Tom St Denis, tomstdenis@gmail.com, http://libtom.org
*/
#include "tomcrypt.h"
#include <stdarg.h>
/**
@file der_encode_sequence_multi.c
ASN.1 DER, encode a SEQUENCE, Tom St Denis
*/
#ifdef LTC_DER
/**
Encode a SEQUENCE type using a VA list
@param out [out] Destination for data
@param outlen [in/out] Length of buffer and resulting length of output
@remark <...> is of the form <type, size, data> (int, unsigned long, void*)
@return CRYPT_OK on success
*/
int der_encode_sequence_multi(unsigned char *out, unsigned long *outlen, ...)
{
int err, type;
unsigned long size, x;
void *data;
va_list args;
ltc_asn1_list *list;
LTC_ARGCHK(out != NULL);
LTC_ARGCHK(outlen != NULL);
/* get size of output that will be required */
va_start(args, outlen);
x = 0;
for (;;) {
type = va_arg(args, int);
size = va_arg(args, unsigned long);
data = va_arg(args, void*);
if (type == LTC_ASN1_EOL) {
break;
}
switch (type) {
case LTC_ASN1_BOOLEAN:
case LTC_ASN1_INTEGER:
case LTC_ASN1_SHORT_INTEGER:
case LTC_ASN1_BIT_STRING:
case LTC_ASN1_OCTET_STRING:
case LTC_ASN1_NULL:
case LTC_ASN1_OBJECT_IDENTIFIER:
case LTC_ASN1_IA5_STRING:
case LTC_ASN1_PRINTABLE_STRING:
case LTC_ASN1_UTF8_STRING:
case LTC_ASN1_UTCTIME:
case LTC_ASN1_SEQUENCE:
case LTC_ASN1_SET:
case LTC_ASN1_SETOF:
++x;
break;
default:
va_end(args);
return CRYPT_INVALID_ARG;
}
}
va_end(args);
/* allocate structure for x elements */
if (x == 0) {
return CRYPT_NOP;
}
list = XCALLOC(sizeof(*list), x);
if (list == NULL) {
return CRYPT_MEM;
}
/* fill in the structure */
va_start(args, outlen);
x = 0;
for (;;) {
type = va_arg(args, int);
size = va_arg(args, unsigned long);
data = va_arg(args, void*);
if (type == LTC_ASN1_EOL) {
break;
}
switch (type) {
case LTC_ASN1_BOOLEAN:
case LTC_ASN1_INTEGER:
case LTC_ASN1_SHORT_INTEGER:
case LTC_ASN1_BIT_STRING:
case LTC_ASN1_OCTET_STRING:
case LTC_ASN1_NULL:
case LTC_ASN1_OBJECT_IDENTIFIER:
case LTC_ASN1_IA5_STRING:
case LTC_ASN1_PRINTABLE_STRING:
case LTC_ASN1_UTF8_STRING:
case LTC_ASN1_UTCTIME:
case LTC_ASN1_SEQUENCE:
case LTC_ASN1_SET:
case LTC_ASN1_SETOF:
list[x].type = type;
list[x].size = size;
list[x++].data = data;
break;
default:
va_end(args);
err = CRYPT_INVALID_ARG;
goto LBL_ERR;
}
}
va_end(args);
err = der_encode_sequence(list, x, out, outlen);
LBL_ERR:
XFREE(list);
return err;
}
#endif
/* $Source: /cvs/libtom/libtomcrypt/src/pk/asn1/der/sequence/der_encode_sequence_multi.c,v $ */
/* $Revision: 1.12 $ */
/* $Date: 2006/12/28 01:27:24 $ */
| gpl-2.0 |
rhuitl/uClinux | lib/libgmp/tune/tuneup.c | 13 | 52818 | /* Create tuned thresholds for various algorithms.
Copyright 1999, 2000, 2001, 2002, 2003, 2005 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
/* Usage: tuneup [-t] [-t] [-p precision]
-t turns on some diagnostic traces, a second -t turns on more traces.
Notes:
The code here isn't a vision of loveliness, mainly because it's subject
to ongoing changes according to new things wanting to be tuned, and
practical requirements of systems tested.
Sometimes running the program twice produces slightly different results.
This is probably because there's so little separating algorithms near
their crossover, and on that basis it should make little or no difference
to the final speed of the relevant routines, but nothing has been done to
check that carefully.
Algorithm:
The thresholds are determined as follows. A crossover may not be a
single size but rather a range where it oscillates between method A or
method B faster. If the threshold is set making B used where A is faster
(or vice versa) that's bad. Badness is the percentage time lost and
total badness is the sum of this over all sizes measured. The threshold
is set to minimize total badness.
Suppose, as sizes increase, method B becomes faster than method A. The
effect of the rule is that, as you look at increasing sizes, isolated
points where B is faster are ignored, but when it's consistently faster,
or faster on balance, then the threshold is set there. The same result
is obtained thinking in the other direction of A becoming faster at
smaller sizes.
In practice the thresholds tend to be chosen to bring on the next
algorithm fairly quickly.
This rule is attractive because it's got a basis in reason and is fairly
easy to implement, but no work has been done to actually compare it in
absolute terms to other possibilities.
Implementation:
In a normal library build the thresholds are constants. To tune them
selected objects are recompiled with the thresholds as global variables
instead. #define TUNE_PROGRAM_BUILD does this, with help from code at
the end of gmp-impl.h, and rules in tune/Makefile.am.
MUL_KARATSUBA_THRESHOLD for example uses a recompiled mpn_mul_n. The
threshold is set to "size+1" to avoid karatsuba, or to "size" to use one
level, but recurse into the basecase.
MUL_TOOM3_THRESHOLD makes use of the tuned MUL_KARATSUBA_THRESHOLD value.
Other routines in turn will make use of both of those. Naturally the
dependants must be tuned first.
In a couple of cases, like DIVEXACT_1_THRESHOLD, there's no recompiling,
just a threshold based on comparing two routines (mpn_divrem_1 and
mpn_divexact_1), and no further use of the value determined.
Flags like USE_PREINV_MOD_1 or JACOBI_BASE_METHOD are even simpler, being
just comparisons between certain routines on representative data.
Shortcuts are applied when native (assembler) versions of routines exist.
For instance a native mpn_sqr_basecase is assumed to be always faster
than mpn_mul_basecase, with no measuring.
No attempt is made to tune within assembler routines, for instance
DIVREM_1_NORM_THRESHOLD. An assembler mpn_divrem_1 is expected to be
written and tuned all by hand. Assembler routines that might have hard
limits are recompiled though, to make them accept a bigger range of sizes
than normal, eg. mpn_sqr_basecase to compare against mpn_kara_sqr_n.
Limitations:
The FFTs aren't subject to the same badness rule as the other thresholds,
so each k is probably being brought on a touch early. This isn't likely
to make a difference, and the simpler probing means fewer tests.
*/
#define TUNE_PROGRAM_BUILD 1 /* for gmp-impl.h */
#include "config.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "gmp.h"
#include "gmp-impl.h"
#include "longlong.h"
#include "tests.h"
#include "speed.h"
#if !HAVE_DECL_OPTARG
extern char *optarg;
extern int optind, opterr;
#endif
#define DEFAULT_MAX_SIZE 1000 /* limbs */
#if WANT_FFT
mp_size_t option_fft_max_size = 50000; /* limbs */
#else
mp_size_t option_fft_max_size = 0;
#endif
int option_trace = 0;
int option_fft_trace = 0;
struct speed_params s;
struct dat_t {
mp_size_t size;
double d;
} *dat = NULL;
int ndat = 0;
int allocdat = 0;
/* This is not defined if mpn_sqr_basecase doesn't declare a limit. In that
case use zero here, which for params.max_size means no limit. */
#ifndef TUNE_SQR_KARATSUBA_MAX
#define TUNE_SQR_KARATSUBA_MAX 0
#endif
mp_size_t mul_karatsuba_threshold = MP_SIZE_T_MAX;
mp_size_t mul_toom3_threshold = MUL_TOOM3_THRESHOLD_LIMIT;
mp_size_t mul_toom44_threshold = MUL_TOOM44_THRESHOLD_LIMIT;
mp_size_t mul_fft_threshold = MP_SIZE_T_MAX;
mp_size_t mul_fft_modf_threshold = MP_SIZE_T_MAX;
mp_size_t sqr_basecase_threshold = MP_SIZE_T_MAX;
mp_size_t sqr_karatsuba_threshold
= (TUNE_SQR_KARATSUBA_MAX == 0 ? MP_SIZE_T_MAX : TUNE_SQR_KARATSUBA_MAX);
mp_size_t sqr_toom3_threshold = SQR_TOOM3_THRESHOLD_LIMIT;
mp_size_t sqr_toom4_threshold = SQR_TOOM4_THRESHOLD_LIMIT;
mp_size_t sqr_fft_threshold = MP_SIZE_T_MAX;
mp_size_t sqr_fft_modf_threshold = MP_SIZE_T_MAX;
mp_size_t mullow_basecase_threshold = MP_SIZE_T_MAX;
mp_size_t mullow_dc_threshold = MP_SIZE_T_MAX;
mp_size_t mullow_mul_n_threshold = MP_SIZE_T_MAX;
mp_size_t div_sb_preinv_threshold = MP_SIZE_T_MAX;
mp_size_t div_dc_threshold = MP_SIZE_T_MAX;
mp_size_t powm_threshold = MP_SIZE_T_MAX;
mp_size_t matrix22_strassen_threshold = MP_SIZE_T_MAX;
mp_size_t hgcd_threshold = MP_SIZE_T_MAX;
mp_size_t gcd_accel_threshold = MP_SIZE_T_MAX;
mp_size_t gcd_dc_threshold = MP_SIZE_T_MAX;
mp_size_t gcdext_dc_threshold = MP_SIZE_T_MAX;
mp_size_t divrem_1_norm_threshold = MP_SIZE_T_MAX;
mp_size_t divrem_1_unnorm_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_norm_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_unnorm_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_1_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_2_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_3_threshold = MP_SIZE_T_MAX;
mp_size_t mod_1_4_threshold = MP_SIZE_T_MAX;
mp_size_t divrem_2_threshold = MP_SIZE_T_MAX;
mp_size_t get_str_dc_threshold = MP_SIZE_T_MAX;
mp_size_t get_str_precompute_threshold = MP_SIZE_T_MAX;
mp_size_t set_str_dc_threshold = MP_SIZE_T_MAX;
mp_size_t set_str_precompute_threshold = MP_SIZE_T_MAX;
mp_size_t fft_modf_sqr_threshold = MP_SIZE_T_MAX;
mp_size_t fft_modf_mul_threshold = MP_SIZE_T_MAX;
struct param_t {
const char *name;
speed_function_t function;
speed_function_t function2;
double step_factor; /* how much to step sizes (rounded down) */
double function_fudge; /* multiplier for "function" speeds */
int stop_since_change;
double stop_factor;
mp_size_t min_size;
int min_is_always;
mp_size_t max_size;
mp_size_t check_size;
mp_size_t size_extra;
#define DATA_HIGH_LT_R 1
#define DATA_HIGH_GE_R 2
int data_high;
int noprint;
};
/* These are normally undefined when false, which suits "#if" fine.
But give them zero values so they can be used in plain C "if"s. */
#ifndef UDIV_PREINV_ALWAYS
#define UDIV_PREINV_ALWAYS 0
#endif
#ifndef HAVE_NATIVE_mpn_divexact_1
#define HAVE_NATIVE_mpn_divexact_1 0
#endif
#ifndef HAVE_NATIVE_mpn_divrem_1
#define HAVE_NATIVE_mpn_divrem_1 0
#endif
#ifndef HAVE_NATIVE_mpn_divrem_2
#define HAVE_NATIVE_mpn_divrem_2 0
#endif
#ifndef HAVE_NATIVE_mpn_mod_1
#define HAVE_NATIVE_mpn_mod_1 0
#endif
#ifndef HAVE_NATIVE_mpn_modexact_1_odd
#define HAVE_NATIVE_mpn_modexact_1_odd 0
#endif
#ifndef HAVE_NATIVE_mpn_preinv_divrem_1
#define HAVE_NATIVE_mpn_preinv_divrem_1 0
#endif
#ifndef HAVE_NATIVE_mpn_preinv_mod_1
#define HAVE_NATIVE_mpn_preinv_mod_1 0
#endif
#ifndef HAVE_NATIVE_mpn_sqr_basecase
#define HAVE_NATIVE_mpn_sqr_basecase 0
#endif
#define MAX3(a,b,c) MAX (MAX (a, b), c)
mp_limb_t
randlimb_norm (void)
{
mp_limb_t n;
mpn_random (&n, 1);
n |= GMP_NUMB_HIGHBIT;
return n;
}
#define GMP_NUMB_HALFMASK ((CNST_LIMB(1) << (GMP_NUMB_BITS/2)) - 1)
mp_limb_t
randlimb_half (void)
{
mp_limb_t n;
mpn_random (&n, 1);
n &= GMP_NUMB_HALFMASK;
n += (n==0);
return n;
}
/* Add an entry to the end of the dat[] array, reallocing to make it bigger
if necessary. */
void
add_dat (mp_size_t size, double d)
{
#define ALLOCDAT_STEP 500
ASSERT_ALWAYS (ndat <= allocdat);
if (ndat == allocdat)
{
dat = (struct dat_t *) __gmp_allocate_or_reallocate
(dat, allocdat * sizeof(dat[0]),
(allocdat+ALLOCDAT_STEP) * sizeof(dat[0]));
allocdat += ALLOCDAT_STEP;
}
dat[ndat].size = size;
dat[ndat].d = d;
ndat++;
}
/* Return the threshold size based on the data accumulated. */
mp_size_t
analyze_dat (int final)
{
double x, min_x;
int j, min_j;
/* If the threshold is set at dat[0].size, any positive values are bad. */
x = 0.0;
for (j = 0; j < ndat; j++)
if (dat[j].d > 0.0)
x += dat[j].d;
if (option_trace >= 2 && final)
{
printf ("\n");
printf ("x is the sum of the badness from setting thresh at given size\n");
printf (" (minimum x is sought)\n");
printf ("size=%ld first x=%.4f\n", (long) dat[j].size, x);
}
min_x = x;
min_j = 0;
/* When stepping to the next dat[j].size, positive values are no longer
bad (so subtracted), negative values become bad (so add the absolute
value, meaning subtract). */
for (j = 0; j < ndat; x -= dat[j].d, j++)
{
if (option_trace >= 2 && final)
printf ("size=%ld x=%.4f\n", (long) dat[j].size, x);
if (x < min_x)
{
min_x = x;
min_j = j;
}
}
return min_j;
}
/* Measuring for recompiled mpn/generic/divrem_1.c and mpn/generic/mod_1.c */
mp_limb_t mpn_divrem_1_tune
__GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
mp_limb_t mpn_mod_1_tune
__GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t));
double
speed_mpn_mod_1_tune (struct speed_params *s)
{
SPEED_ROUTINE_MPN_MOD_1 (mpn_mod_1_tune);
}
double
speed_mpn_divrem_1_tune (struct speed_params *s)
{
SPEED_ROUTINE_MPN_DIVREM_1 (mpn_divrem_1_tune);
}
double
tuneup_measure (speed_function_t fun,
const struct param_t *param,
struct speed_params *s)
{
static struct param_t dummy;
double t;
TMP_DECL;
if (! param)
param = &dummy;
s->size += param->size_extra;
TMP_MARK;
SPEED_TMP_ALLOC_LIMBS (s->xp, s->size, 0);
SPEED_TMP_ALLOC_LIMBS (s->yp, s->size, 0);
mpn_random (s->xp, s->size);
mpn_random (s->yp, s->size);
switch (param->data_high) {
case DATA_HIGH_LT_R:
s->xp[s->size-1] %= s->r;
s->yp[s->size-1] %= s->r;
break;
case DATA_HIGH_GE_R:
s->xp[s->size-1] |= s->r;
s->yp[s->size-1] |= s->r;
break;
}
t = speed_measure (fun, s);
s->size -= param->size_extra;
TMP_FREE;
return t;
}
#define PRINT_WIDTH 28
void
print_define_start (const char *name)
{
printf ("#define %-*s ", PRINT_WIDTH, name);
if (option_trace)
printf ("...\n");
}
void
print_define_end_remark (const char *name, mp_size_t value, const char *remark)
{
if (option_trace)
printf ("#define %-*s ", PRINT_WIDTH, name);
if (value == MP_SIZE_T_MAX)
printf ("MP_SIZE_T_MAX");
else
printf ("%5ld", (long) value);
if (remark != NULL)
printf (" /* %s */", remark);
printf ("\n");
}
void
print_define_end (const char *name, mp_size_t value)
{
const char *remark;
if (value == MP_SIZE_T_MAX)
remark = "never";
else if (value == 0)
remark = "always";
else
remark = NULL;
print_define_end_remark (name, value, remark);
}
void
print_define (const char *name, mp_size_t value)
{
print_define_start (name);
print_define_end (name, value);
}
void
print_define_remark (const char *name, mp_size_t value, const char *remark)
{
print_define_start (name);
print_define_end_remark (name, value, remark);
}
void
one (mp_size_t *threshold, struct param_t *param)
{
int since_positive, since_thresh_change;
int thresh_idx, new_thresh_idx;
#define DEFAULT(x,n) do { if (! (x)) (x) = (n); } while (0)
DEFAULT (param->function_fudge, 1.0);
DEFAULT (param->function2, param->function);
DEFAULT (param->step_factor, 0.01); /* small steps by default */
DEFAULT (param->stop_since_change, 80);
DEFAULT (param->stop_factor, 1.2);
DEFAULT (param->min_size, 10);
DEFAULT (param->max_size, DEFAULT_MAX_SIZE);
if (param->check_size != 0)
{
double t1, t2;
s.size = param->check_size;
*threshold = s.size+1;
t1 = tuneup_measure (param->function, param, &s);
*threshold = s.size;
t2 = tuneup_measure (param->function2, param, &s);
if (t1 == -1.0 || t2 == -1.0)
{
printf ("Oops, can't run both functions at size %ld\n",
(long) s.size);
abort ();
}
t1 *= param->function_fudge;
/* ask that t2 is at least 4% below t1 */
if (t1 < t2*1.04)
{
if (option_trace)
printf ("function2 never enough faster: t1=%.9f t2=%.9f\n", t1, t2);
*threshold = MP_SIZE_T_MAX;
if (! param->noprint)
print_define (param->name, *threshold);
return;
}
if (option_trace >= 2)
printf ("function2 enough faster at size=%ld: t1=%.9f t2=%.9f\n",
(long) s.size, t1, t2);
}
if (! param->noprint || option_trace)
print_define_start (param->name);
ndat = 0;
since_positive = 0;
since_thresh_change = 0;
thresh_idx = 0;
if (option_trace >= 2)
{
printf (" algorithm-A algorithm-B ratio possible\n");
printf (" (seconds) (seconds) diff thresh\n");
}
for (s.size = param->min_size;
s.size < param->max_size;
s.size += MAX ((mp_size_t) floor (s.size * param->step_factor), 1))
{
double ti, tiplus1, d;
/* If there's a size limit and it's reached then it should still
be sensible to analyze the data since we want the threshold put
either at or near the limit. */
if (s.size >= param->max_size)
{
if (option_trace)
printf ("Reached maximum size (%ld) without otherwise stopping\n",
(long) param->max_size);
break;
}
/*
FIXME: check minimum size requirements are met, possibly by just
checking for the -1 returns from the speed functions.
*/
/* using method A at this size */
*threshold = s.size+1;
ti = tuneup_measure (param->function, param, &s);
if (ti == -1.0)
abort ();
ti *= param->function_fudge;
/* using method B at this size */
*threshold = s.size;
tiplus1 = tuneup_measure (param->function2, param, &s);
if (tiplus1 == -1.0)
abort ();
/* Calculate the fraction by which the one or the other routine is
slower. */
if (tiplus1 >= ti)
d = (tiplus1 - ti) / tiplus1; /* negative */
else
d = (tiplus1 - ti) / ti; /* positive */
add_dat (s.size, d);
new_thresh_idx = analyze_dat (0);
if (option_trace >= 2)
printf ("size=%ld %.9f %.9f % .4f %c %ld\n",
(long) s.size, ti, tiplus1, d,
ti > tiplus1 ? '#' : ' ',
(long) dat[new_thresh_idx].size);
/* Stop if the last time method i was faster was more than a
certain number of measurements ago. */
#define STOP_SINCE_POSITIVE 200
if (d >= 0)
since_positive = 0;
else
if (++since_positive > STOP_SINCE_POSITIVE)
{
if (option_trace >= 1)
printf ("stopped due to since_positive (%d)\n",
STOP_SINCE_POSITIVE);
break;
}
/* Stop if method A has become slower by a certain factor. */
if (ti >= tiplus1 * param->stop_factor)
{
if (option_trace >= 1)
printf ("stopped due to ti >= tiplus1 * factor (%.1f)\n",
param->stop_factor);
break;
}
/* Stop if the threshold implied hasn't changed in a certain
number of measurements. (It's this condition that ususally
stops the loop.) */
if (thresh_idx != new_thresh_idx)
since_thresh_change = 0, thresh_idx = new_thresh_idx;
else
if (++since_thresh_change > param->stop_since_change)
{
if (option_trace >= 1)
printf ("stopped due to since_thresh_change (%d)\n",
param->stop_since_change);
break;
}
/* Stop if the threshold implied is more than a certain number of
measurements ago. */
#define STOP_SINCE_AFTER 500
if (ndat - thresh_idx > STOP_SINCE_AFTER)
{
if (option_trace >= 1)
printf ("stopped due to ndat - thresh_idx > amount (%d)\n",
STOP_SINCE_AFTER);
break;
}
/* Stop when the size limit is reached before the end of the
crossover, but only show this as an error for >= the default max
size. FIXME: Maybe should make it a param choice whether this is
an error. */
if (s.size >= param->max_size && param->max_size >= DEFAULT_MAX_SIZE)
{
fprintf (stderr, "%s\n", param->name);
fprintf (stderr, "sizes %ld to %ld total %d measurements\n",
(long) dat[0].size, (long) dat[ndat-1].size, ndat);
fprintf (stderr, " max size reached before end of crossover\n");
break;
}
}
if (option_trace >= 1)
printf ("sizes %ld to %ld total %d measurements\n",
(long) dat[0].size, (long) dat[ndat-1].size, ndat);
*threshold = dat[analyze_dat (1)].size;
if (param->min_is_always)
{
if (*threshold == param->min_size)
*threshold = 0;
}
if (! param->noprint || option_trace)
print_define_end (param->name, *threshold);
}
/* Special probing for the fft thresholds. The size restrictions on the
FFTs mean the graph of time vs size has a step effect. See this for
example using
./speed -s 4096-16384 -t 128 -P foo mpn_mul_fft.8 mpn_mul_fft.9
gnuplot foo.gnuplot
The current approach is to compare routines at the midpoint of relevant
steps. Arguably a more sophisticated system of threshold data is wanted
if this step effect remains. */
struct fft_param_t {
const char *table_name;
const char *threshold_name;
const char *modf_threshold_name;
mp_size_t *p_threshold;
mp_size_t *p_modf_threshold;
mp_size_t first_size;
mp_size_t max_size;
speed_function_t function;
speed_function_t mul_function;
mp_size_t sqr;
};
/* mpn_mul_fft requires pl a multiple of 2^k limbs, but with
N=pl*BIT_PER_MP_LIMB it internally also pads out so N/2^k is a multiple
of 2^(k-1) bits. */
mp_size_t
fft_step_size (int k)
{
mp_size_t step;
step = MAX ((mp_size_t) 1 << (k-1), BITS_PER_MP_LIMB) / BITS_PER_MP_LIMB;
step *= (mp_size_t) 1 << k;
if (step <= 0)
{
printf ("Can't handle k=%d\n", k);
abort ();
}
return step;
}
mp_size_t
fft_next_size (mp_size_t pl, int k)
{
mp_size_t m = fft_step_size (k);
/* printf ("[k=%d %ld] %ld ->", k, m, pl); */
if (pl == 0 || (pl & (m-1)) != 0)
pl = (pl | (m-1)) + 1;
/* printf (" %ld\n", pl); */
return pl;
}
void
fft (struct fft_param_t *p)
{
mp_size_t size;
int i, k;
for (i = 0; i < numberof (mpn_fft_table[p->sqr]); i++)
mpn_fft_table[p->sqr][i] = MP_SIZE_T_MAX;
*p->p_threshold = MP_SIZE_T_MAX;
*p->p_modf_threshold = MP_SIZE_T_MAX;
option_trace = MAX (option_trace, option_fft_trace);
printf ("#define %s {", p->table_name);
if (option_trace >= 2)
printf ("\n");
k = FFT_FIRST_K;
size = p->first_size;
for (;;)
{
double tk, tk1;
size = fft_next_size (size+1, k+1);
if (size >= p->max_size)
break;
if (k >= FFT_FIRST_K + numberof (mpn_fft_table[p->sqr]))
break;
/* compare k to k+1 in the middle of the current k+1 step */
s.size = size + fft_step_size (k+1) / 2;
s.r = k;
tk = tuneup_measure (p->function, NULL, &s);
if (tk == -1.0)
abort ();
s.r = k+1;
tk1 = tuneup_measure (p->function, NULL, &s);
if (tk1 == -1.0)
abort ();
if (option_trace >= 2)
printf ("at %ld size=%ld k=%d %.9f k=%d %.9f\n",
(long) size, (long) s.size, k, tk, k+1, tk1);
/* declare the k+1 threshold as soon as it's faster at its midpoint */
if (tk1 < tk)
{
mpn_fft_table[p->sqr][k-FFT_FIRST_K] = s.size;
printf (" %ld,", (long) s.size);
if (option_trace >= 2) printf ("\n");
k++;
}
}
mpn_fft_table[p->sqr][k-FFT_FIRST_K] = 0;
printf (" 0 }\n");
size = p->first_size;
/* Declare an FFT faster than a plain toom4 etc multiplication found as
soon as one faster measurement obtained. A multiplication in the
middle of the FFT step is tested. */
for (;;)
{
int modf = (*p->p_modf_threshold == MP_SIZE_T_MAX);
double tk, tm;
/* k=7 should be the first FFT which can beat toom4 on a full
multiply, so jump to that threshold and save some probing after the
modf threshold is found. */
if (!modf && size < mpn_fft_table[p->sqr][2])
{
size = mpn_fft_table[p->sqr][2];
if (option_trace >= 2)
printf ("jump to size=%ld\n", (long) size);
}
size = fft_next_size (size+1, mpn_fft_best_k (size, p->sqr));
k = mpn_fft_best_k (size, p->sqr);
if (size >= p->max_size)
break;
s.size = size + fft_step_size (k) / 2;
s.r = k;
tk = tuneup_measure (p->function, NULL, &s);
if (tk == -1.0)
abort ();
if (!modf) s.size /= 2;
tm = tuneup_measure (p->mul_function, NULL, &s);
if (tm == -1.0)
abort ();
if (option_trace >= 2)
printf ("at %ld size=%ld k=%d %.9f size=%ld %s mul %.9f\n",
(long) size,
(long) size + fft_step_size (k) / 2, k, tk,
(long) s.size, modf ? "modf" : "full", tm);
if (tk < tm)
{
if (modf)
{
*p->p_modf_threshold = s.size;
print_define (p->modf_threshold_name, *p->p_modf_threshold);
}
else
{
*p->p_threshold = s.size;
print_define (p->threshold_name, *p->p_threshold);
break;
}
}
}
}
/* Start karatsuba from 4, since the Cray t90 ieee code is much faster at 2,
giving wrong results. */
void
tune_mul (void)
{
static struct param_t param;
param.function = speed_mpn_mul_n;
param.name = "MUL_KARATSUBA_THRESHOLD";
param.min_size = MAX (4, MPN_KARA_MUL_N_MINSIZE);
param.max_size = MUL_KARATSUBA_THRESHOLD_LIMIT-1;
one (&mul_karatsuba_threshold, ¶m);
param.name = "MUL_TOOM3_THRESHOLD";
param.min_size = MAX (mul_karatsuba_threshold, MPN_TOOM3_MUL_N_MINSIZE);
param.max_size = MUL_TOOM3_THRESHOLD_LIMIT-1;
one (&mul_toom3_threshold, ¶m);
param.name = "MUL_TOOM44_THRESHOLD";
param.min_size = MAX (mul_toom3_threshold, MPN_TOOM44_MUL_N_MINSIZE);
param.max_size = MUL_TOOM44_THRESHOLD_LIMIT-1;
one (&mul_toom44_threshold, ¶m);
/* disabled until tuned */
MUL_FFT_THRESHOLD = MP_SIZE_T_MAX;
}
/* This was written by the tuneup challenged tege. Kevin, please delete
this comment when you've reviewed/rewritten this. :-) */
void
tune_mullow (void)
{
static struct param_t param;
param.function = speed_mpn_mullow_n;
param.name = "MULLOW_BASECASE_THRESHOLD";
param.min_size = 3;
param.min_is_always = 1;
param.max_size = MULLOW_BASECASE_THRESHOLD_LIMIT-1;
one (&mullow_basecase_threshold, ¶m);
param.min_is_always = 0; /* ??? */
param.name = "MULLOW_DC_THRESHOLD";
param.min_size = mul_karatsuba_threshold;
param.max_size = 1000;
one (&mullow_dc_threshold, ¶m);
param.name = "MULLOW_MUL_N_THRESHOLD";
param.min_size = mullow_dc_threshold;
param.max_size = 2000;
one (&mullow_mul_n_threshold, ¶m);
}
/* Start the basecase from 3, since 1 is a special case, and if mul_basecase
is faster only at size==2 then we don't want to bother with extra code
just for that. Start karatsuba from 4 same as MUL above. */
void
tune_sqr (void)
{
/* disabled until tuned */
SQR_FFT_THRESHOLD = MP_SIZE_T_MAX;
if (HAVE_NATIVE_mpn_sqr_basecase)
{
print_define_remark ("SQR_BASECASE_THRESHOLD", 0, "always (native)");
sqr_basecase_threshold = 0;
}
else
{
static struct param_t param;
param.name = "SQR_BASECASE_THRESHOLD";
param.function = speed_mpn_sqr_n;
param.min_size = 3;
param.min_is_always = 1;
param.max_size = TUNE_SQR_KARATSUBA_MAX;
param.noprint = 1;
one (&sqr_basecase_threshold, ¶m);
}
{
static struct param_t param;
param.name = "SQR_KARATSUBA_THRESHOLD";
param.function = speed_mpn_sqr_n;
param.min_size = MAX (4, MPN_KARA_SQR_N_MINSIZE);
param.max_size = TUNE_SQR_KARATSUBA_MAX;
param.noprint = 1;
one (&sqr_karatsuba_threshold, ¶m);
if (! HAVE_NATIVE_mpn_sqr_basecase
&& sqr_karatsuba_threshold < sqr_basecase_threshold)
{
/* Karatsuba becomes faster than mul_basecase before
sqr_basecase does. Arrange for the expression
"BELOW_THRESHOLD (un, SQR_KARATSUBA_THRESHOLD))" which
selects mpn_sqr_basecase in mpn_sqr_n to be false, by setting
SQR_KARATSUBA_THRESHOLD to zero, making
SQR_BASECASE_THRESHOLD the karatsuba threshold. */
sqr_basecase_threshold = SQR_KARATSUBA_THRESHOLD;
SQR_KARATSUBA_THRESHOLD = 0;
print_define_remark ("SQR_BASECASE_THRESHOLD", sqr_basecase_threshold,
"karatsuba");
print_define_remark ("SQR_KARATSUBA_THRESHOLD",SQR_KARATSUBA_THRESHOLD,
"never sqr_basecase");
}
else
{
if (! HAVE_NATIVE_mpn_sqr_basecase)
print_define ("SQR_BASECASE_THRESHOLD", sqr_basecase_threshold);
print_define ("SQR_KARATSUBA_THRESHOLD", SQR_KARATSUBA_THRESHOLD);
}
}
{
static struct param_t param;
mp_size_t toom3_start = MAX (sqr_karatsuba_threshold, sqr_basecase_threshold);
param.function = speed_mpn_sqr_n;
param.name = "SQR_TOOM3_THRESHOLD";
param.min_size = MAX (toom3_start, MPN_TOOM3_SQR_N_MINSIZE);
param.max_size = SQR_TOOM3_THRESHOLD_LIMIT-1;
one (&sqr_toom3_threshold, ¶m);
param.name = "SQR_TOOM4_THRESHOLD";
param.min_size = MAX (sqr_toom3_threshold, MPN_TOOM4_SQR_N_MINSIZE);
param.max_size = SQR_TOOM4_THRESHOLD_LIMIT-1;
one (&sqr_toom4_threshold, ¶m);
}
}
void
tune_sb_preinv (void)
{
static struct param_t param;
if (GMP_NAIL_BITS != 0)
{
DIV_SB_PREINV_THRESHOLD = MP_SIZE_T_MAX;
print_define_remark ("DIV_SB_PREINV_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
return;
}
if (UDIV_PREINV_ALWAYS)
{
print_define_remark ("DIV_SB_PREINV_THRESHOLD", 0L, "preinv always");
return;
}
param.check_size = 256;
param.min_size = 3;
param.min_is_always = 1;
param.size_extra = 3;
param.stop_factor = 2.0;
param.name = "DIV_SB_PREINV_THRESHOLD";
param.function = speed_mpn_sb_divrem_m3;
one (&div_sb_preinv_threshold, ¶m);
}
void
tune_dc (void)
{
static struct param_t param;
param.name = "DIV_DC_THRESHOLD";
param.function = speed_mpn_dc_tdiv_qr;
param.step_factor = 0.02;
one (&div_dc_threshold, ¶m);
}
/* This is an indirect determination, based on a comparison between redc and
mpz_mod. A fudge factor of 1.04 is applied to redc, to represent
additional overheads it gets in mpz_powm.
stop_factor is 1.1 to hopefully help cray vector systems, where otherwise
currently it hits the 1000 limb limit with only a factor of about 1.18
(threshold should be around 650). */
void
tune_powm (void)
{
static struct param_t param;
param.name = "POWM_THRESHOLD";
param.function = speed_mpn_redc_1;
param.function2 = speed_mpz_mod;
param.step_factor = 0.03;
param.stop_factor = 1.1;
param.function_fudge = 1.04;
one (&powm_threshold, ¶m);
}
void
tune_matrix22_mul (void)
{
static struct param_t param;
param.name = "MATRIX22_STRASSEN_THRESHOLD";
param.function = speed_mpn_matrix22_mul;
param.min_size = 2;
one (&matrix22_strassen_threshold, ¶m);
}
void
tune_hgcd (void)
{
static struct param_t param;
param.name = "HGCD_THRESHOLD";
param.function = speed_mpn_hgcd;
/* We seem to get strange results for small sizes */
param.min_size = 30;
one (&hgcd_threshold, ¶m);
}
#if 0
void
tune_gcd_accel (void)
{
static struct param_t param;
param.name = "GCD_ACCEL_THRESHOLD";
param.function = speed_mpn_gcd;
param.min_size = 1;
one (&gcd_accel_threshold, ¶m);
}
#endif
void
tune_gcd_dc (void)
{
static struct param_t param;
param.name = "GCD_DC_THRESHOLD";
param.function = speed_mpn_gcd;
param.min_size = hgcd_threshold;
param.max_size = 3000;
param.step_factor = 0.02;
one (&gcd_dc_threshold, ¶m);
}
void
tune_gcdext_dc (void)
{
static struct param_t param;
param.name = "GCDEXT_DC_THRESHOLD";
param.function = speed_mpn_gcdext;
param.min_size = hgcd_threshold;
param.max_size = 3000;
param.step_factor = 0.02;
one (&gcdext_dc_threshold, ¶m);
}
/* size_extra==1 reflects the fact that with high<divisor one division is
always skipped. Forcing high<divisor while testing ensures consistency
while stepping through sizes, ie. that size-1 divides will be done each
time.
min_size==2 and min_is_always are used so that if plain division is only
better at size==1 then don't bother including that code just for that
case, instead go with preinv always and get a size saving. */
#define DIV_1_PARAMS \
param.check_size = 256; \
param.min_size = 2; \
param.min_is_always = 1; \
param.data_high = DATA_HIGH_LT_R; \
param.size_extra = 1; \
param.stop_factor = 2.0;
double (*tuned_speed_mpn_divrem_1) __GMP_PROTO ((struct speed_params *));
void
tune_divrem_1 (void)
{
/* plain version by default */
tuned_speed_mpn_divrem_1 = speed_mpn_divrem_1;
/* No support for tuning native assembler code, do that by hand and put
the results in the .asm file, there's no need for such thresholds to
appear in gmp-mparam.h. */
if (HAVE_NATIVE_mpn_divrem_1)
return;
if (GMP_NAIL_BITS != 0)
{
print_define_remark ("DIVREM_1_NORM_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
print_define_remark ("DIVREM_1_UNNORM_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
return;
}
if (UDIV_PREINV_ALWAYS)
{
print_define_remark ("DIVREM_1_NORM_THRESHOLD", 0L, "preinv always");
print_define ("DIVREM_1_UNNORM_THRESHOLD", 0L);
return;
}
tuned_speed_mpn_divrem_1 = speed_mpn_divrem_1_tune;
/* Tune for the integer part of mpn_divrem_1. This will very possibly be
a bit out for the fractional part, but that's too bad, the integer part
is more important. */
{
static struct param_t param;
param.name = "DIVREM_1_NORM_THRESHOLD";
DIV_1_PARAMS;
s.r = randlimb_norm ();
param.function = speed_mpn_divrem_1_tune;
one (&divrem_1_norm_threshold, ¶m);
}
{
static struct param_t param;
param.name = "DIVREM_1_UNNORM_THRESHOLD";
DIV_1_PARAMS;
s.r = randlimb_half ();
param.function = speed_mpn_divrem_1_tune;
one (&divrem_1_unnorm_threshold, ¶m);
}
}
double (*tuned_speed_mpn_mod_1) __GMP_PROTO ((struct speed_params *));
void
tune_mod_1 (void)
{
/* plain version by default */
tuned_speed_mpn_mod_1 = speed_mpn_mod_1;
/* No support for tuning native assembler code, do that by hand and put
the results in the .asm file, there's no need for such thresholds to
appear in gmp-mparam.h. */
if (HAVE_NATIVE_mpn_mod_1)
return;
if (GMP_NAIL_BITS != 0)
{
print_define_remark ("MOD_1_NORM_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
print_define_remark ("MOD_1_UNNORM_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
return;
}
if (UDIV_PREINV_ALWAYS)
{
print_define ("MOD_1_NORM_THRESHOLD", 0L);
print_define ("MOD_1_UNNORM_THRESHOLD", 0L);
}
else
{
tuned_speed_mpn_mod_1 = speed_mpn_mod_1_tune;
{
static struct param_t param;
param.name = "MOD_1_NORM_THRESHOLD";
DIV_1_PARAMS;
s.r = randlimb_norm ();
param.function = speed_mpn_mod_1_tune;
one (&mod_1_norm_threshold, ¶m);
}
{
static struct param_t param;
param.name = "MOD_1_UNNORM_THRESHOLD";
DIV_1_PARAMS;
s.r = randlimb_half ();
param.function = speed_mpn_mod_1_tune;
one (&mod_1_unnorm_threshold, ¶m);
}
}
{
static struct param_t param;
s.r = GMP_NUMB_MASK / 5;
param.function = speed_mpn_mod_1_tune;
param.min_size = 1;
param.name = "MOD_1_1_THRESHOLD";
one (&mod_1_1_threshold, ¶m);
param.name = "MOD_1_2_THRESHOLD";
param.min_size = mod_1_1_threshold + 1;
one (&mod_1_2_threshold, ¶m);
#if 0
param.name = "MOD_1_3_THRESHOLD";
param.min_size = mod_1_2_threshold + 1;
one (&mod_1_3_threshold, ¶m);
#endif
param.name = "MOD_1_4_THRESHOLD";
param.min_size = mod_1_2_threshold + 1;
one (&mod_1_4_threshold, ¶m);
}
}
/* A non-zero DIVREM_1_UNNORM_THRESHOLD (or DIVREM_1_NORM_THRESHOLD) would
imply that udiv_qrnnd_preinv is worth using, but it seems most
straightforward to compare mpn_preinv_divrem_1 and mpn_divrem_1_div
directly. */
void
tune_preinv_divrem_1 (void)
{
static struct param_t param;
speed_function_t divrem_1;
const char *divrem_1_name;
double t1, t2;
if (GMP_NAIL_BITS != 0)
{
print_define_remark ("USE_PREINV_DIVREM_1", 0, "no preinv with nails");
return;
}
/* Any native version of mpn_preinv_divrem_1 is assumed to exist because
it's faster than mpn_divrem_1. */
if (HAVE_NATIVE_mpn_preinv_divrem_1)
{
print_define_remark ("USE_PREINV_DIVREM_1", 1, "native");
return;
}
/* If udiv_qrnnd_preinv is the only division method then of course
mpn_preinv_divrem_1 should be used. */
if (UDIV_PREINV_ALWAYS)
{
print_define_remark ("USE_PREINV_DIVREM_1", 1, "preinv always");
return;
}
/* If we've got an assembler version of mpn_divrem_1, then compare against
that, not the mpn_divrem_1_div generic C. */
if (HAVE_NATIVE_mpn_divrem_1)
{
divrem_1 = speed_mpn_divrem_1;
divrem_1_name = "mpn_divrem_1";
}
else
{
divrem_1 = speed_mpn_divrem_1_div;
divrem_1_name = "mpn_divrem_1_div";
}
param.data_high = DATA_HIGH_LT_R; /* allow skip one division */
s.size = 200; /* generous but not too big */
/* Divisor, nonzero. Unnormalized so as to exercise the shift!=0 case,
since in general that's probably most common, though in fact for a
64-bit limb mp_bases[10].big_base is normalized. */
s.r = urandom() & (GMP_NUMB_MASK >> 4);
if (s.r == 0) s.r = 123;
t1 = tuneup_measure (speed_mpn_preinv_divrem_1, ¶m, &s);
t2 = tuneup_measure (divrem_1, ¶m, &s);
if (t1 == -1.0 || t2 == -1.0)
{
printf ("Oops, can't measure mpn_preinv_divrem_1 and %s at %ld\n",
divrem_1_name, (long) s.size);
abort ();
}
if (option_trace >= 1)
printf ("size=%ld, mpn_preinv_divrem_1 %.9f, %s %.9f\n",
(long) s.size, t1, divrem_1_name, t2);
print_define_remark ("USE_PREINV_DIVREM_1", (mp_size_t) (t1 < t2), NULL);
}
/* A non-zero MOD_1_UNNORM_THRESHOLD (or MOD_1_NORM_THRESHOLD) would imply
that udiv_qrnnd_preinv is worth using, but it seems most straightforward
to compare mpn_preinv_mod_1 and mpn_mod_1_div directly. */
void
tune_preinv_mod_1 (void)
{
static struct param_t param;
speed_function_t mod_1;
const char *mod_1_name;
double t1, t2;
/* Any native version of mpn_preinv_mod_1 is assumed to exist because it's
faster than mpn_mod_1. */
if (HAVE_NATIVE_mpn_preinv_mod_1)
{
print_define_remark ("USE_PREINV_MOD_1", 1, "native");
return;
}
if (GMP_NAIL_BITS != 0)
{
print_define_remark ("USE_PREINV_MOD_1", 0, "no preinv with nails");
return;
}
/* If udiv_qrnnd_preinv is the only division method then of course
mpn_preinv_mod_1 should be used. */
if (UDIV_PREINV_ALWAYS)
{
print_define_remark ("USE_PREINV_MOD_1", 1, "preinv always");
return;
}
/* If we've got an assembler version of mpn_mod_1, then compare against
that, not the mpn_mod_1_div generic C. */
if (HAVE_NATIVE_mpn_mod_1)
{
mod_1 = speed_mpn_mod_1;
mod_1_name = "mpn_mod_1";
}
else
{
mod_1 = speed_mpn_mod_1_div;
mod_1_name = "mpn_mod_1_div";
}
param.data_high = DATA_HIGH_LT_R; /* let mpn_mod_1 skip one division */
s.size = 200; /* generous but not too big */
s.r = randlimb_norm(); /* divisor */
t1 = tuneup_measure (speed_mpn_preinv_mod_1, ¶m, &s);
t2 = tuneup_measure (mod_1, ¶m, &s);
if (t1 == -1.0 || t2 == -1.0)
{
printf ("Oops, can't measure mpn_preinv_mod_1 and %s at %ld\n",
mod_1_name, (long) s.size);
abort ();
}
if (option_trace >= 1)
printf ("size=%ld, mpn_preinv_mod_1 %.9f, %s %.9f\n",
(long) s.size, t1, mod_1_name, t2);
print_define_remark ("USE_PREINV_MOD_1", (mp_size_t) (t1 < t2), NULL);
}
void
tune_divrem_2 (void)
{
static struct param_t param;
/* No support for tuning native assembler code, do that by hand and put
the results in the .asm file, and there's no need for such thresholds
to appear in gmp-mparam.h. */
if (HAVE_NATIVE_mpn_divrem_2)
return;
if (GMP_NAIL_BITS != 0)
{
print_define_remark ("DIVREM_2_THRESHOLD", MP_SIZE_T_MAX,
"no preinv with nails");
return;
}
if (UDIV_PREINV_ALWAYS)
{
print_define_remark ("DIVREM_2_THRESHOLD", 0L, "preinv always");
return;
}
/* Tune for the integer part of mpn_divrem_2. This will very possibly be
a bit out for the fractional part, but that's too bad, the integer part
is more important.
min_size must be >=2 since nsize>=2 is required, but is set to 4 to save
code space if plain division is better only at size==2 or size==3. */
param.name = "DIVREM_2_THRESHOLD";
param.check_size = 256;
param.min_size = 4;
param.min_is_always = 1;
param.size_extra = 2; /* does qsize==nsize-2 divisions */
param.stop_factor = 2.0;
s.r = randlimb_norm ();
param.function = speed_mpn_divrem_2;
one (&divrem_2_threshold, ¶m);
}
/* mpn_divexact_1 is vaguely expected to be used on smallish divisors, so
tune for that. Its speed can differ on odd or even divisor, so take an
average threshold for the two.
mpn_divrem_1 can vary with high<divisor or not, whereas mpn_divexact_1
might not vary that way, but don't test this since high<divisor isn't
expected to occur often with small divisors. */
void
tune_divexact_1 (void)
{
static struct param_t param;
mp_size_t thresh[2], average;
int low, i;
/* Any native mpn_divexact_1 is assumed to incorporate all the speed of a
full mpn_divrem_1. */
if (HAVE_NATIVE_mpn_divexact_1)
{
print_define_remark ("DIVEXACT_1_THRESHOLD", 0, "always (native)");
return;
}
ASSERT_ALWAYS (tuned_speed_mpn_divrem_1 != NULL);
param.name = "DIVEXACT_1_THRESHOLD";
param.data_high = DATA_HIGH_GE_R;
param.check_size = 256;
param.min_size = 2;
param.stop_factor = 1.5;
param.function = tuned_speed_mpn_divrem_1;
param.function2 = speed_mpn_divexact_1;
param.noprint = 1;
print_define_start (param.name);
for (low = 0; low <= 1; low++)
{
s.r = randlimb_half();
if (low == 0)
s.r |= 1;
else
s.r &= ~CNST_LIMB(7);
one (&thresh[low], ¶m);
if (option_trace)
printf ("low=%d thresh %ld\n", low, (long) thresh[low]);
if (thresh[low] == MP_SIZE_T_MAX)
{
average = MP_SIZE_T_MAX;
goto divexact_1_done;
}
}
if (option_trace)
{
printf ("average of:");
for (i = 0; i < numberof(thresh); i++)
printf (" %ld", (long) thresh[i]);
printf ("\n");
}
average = 0;
for (i = 0; i < numberof(thresh); i++)
average += thresh[i];
average /= numberof(thresh);
/* If divexact turns out to be better as early as 3 limbs, then use it
always, so as to reduce code size and conditional jumps. */
if (average <= 3)
average = 0;
divexact_1_done:
print_define_end (param.name, average);
}
/* The generic mpn_modexact_1_odd skips a divide step if high<divisor, the
same as mpn_mod_1, but this might not be true of an assembler
implementation. The threshold used is an average based on data where a
divide can be skipped and where it can't.
If modexact turns out to be better as early as 3 limbs, then use it
always, so as to reduce code size and conditional jumps. */
void
tune_modexact_1_odd (void)
{
static struct param_t param;
mp_size_t thresh_lt, thresh_ge, average;
/* Any native mpn_modexact_1_odd is assumed to incorporate all the speed
of a full mpn_mod_1. */
if (HAVE_NATIVE_mpn_modexact_1_odd)
{
print_define_remark ("MODEXACT_1_ODD_THRESHOLD", 0, "always (native)");
return;
}
ASSERT_ALWAYS (tuned_speed_mpn_mod_1 != NULL);
param.name = "MODEXACT_1_ODD_THRESHOLD";
param.check_size = 256;
param.min_size = 2;
param.stop_factor = 1.5;
param.function = tuned_speed_mpn_mod_1;
param.function2 = speed_mpn_modexact_1c_odd;
param.noprint = 1;
s.r = randlimb_half () | 1;
print_define_start (param.name);
param.data_high = DATA_HIGH_LT_R;
one (&thresh_lt, ¶m);
if (option_trace)
printf ("lt thresh %ld\n", (long) thresh_lt);
average = thresh_lt;
if (thresh_lt != MP_SIZE_T_MAX)
{
param.data_high = DATA_HIGH_GE_R;
one (&thresh_ge, ¶m);
if (option_trace)
printf ("ge thresh %ld\n", (long) thresh_ge);
if (thresh_ge != MP_SIZE_T_MAX)
{
average = (thresh_ge + thresh_lt) / 2;
if (thresh_ge <= 3)
average = 0;
}
}
print_define_end (param.name, average);
}
void
tune_jacobi_base (void)
{
static struct param_t param;
double t1, t2, t3;
int method;
s.size = BITS_PER_MP_LIMB * 3 / 4;
t1 = tuneup_measure (speed_mpn_jacobi_base_1, ¶m, &s);
if (option_trace >= 1)
printf ("size=%ld, mpn_jacobi_base_1 %.9f\n", (long) s.size, t1);
t2 = tuneup_measure (speed_mpn_jacobi_base_2, ¶m, &s);
if (option_trace >= 1)
printf ("size=%ld, mpn_jacobi_base_2 %.9f\n", (long) s.size, t2);
t3 = tuneup_measure (speed_mpn_jacobi_base_3, ¶m, &s);
if (option_trace >= 1)
printf ("size=%ld, mpn_jacobi_base_3 %.9f\n", (long) s.size, t3);
if (t1 == -1.0 || t2 == -1.0 || t3 == -1.0)
{
printf ("Oops, can't measure all mpn_jacobi_base methods at %ld\n",
(long) s.size);
abort ();
}
if (t1 < t2 && t1 < t3)
method = 1;
else if (t2 < t3)
method = 2;
else
method = 3;
print_define ("JACOBI_BASE_METHOD", method);
}
void
tune_get_str (void)
{
/* Tune for decimal, it being most common. Some rough testing suggests
other bases are different, but not by very much. */
s.r = 10;
{
static struct param_t param;
GET_STR_PRECOMPUTE_THRESHOLD = 0;
param.name = "GET_STR_DC_THRESHOLD";
param.function = speed_mpn_get_str;
param.min_size = 4;
param.max_size = GET_STR_THRESHOLD_LIMIT;
one (&get_str_dc_threshold, ¶m);
}
{
static struct param_t param;
param.name = "GET_STR_PRECOMPUTE_THRESHOLD";
param.function = speed_mpn_get_str;
param.min_size = GET_STR_DC_THRESHOLD;
param.max_size = GET_STR_THRESHOLD_LIMIT;
one (&get_str_precompute_threshold, ¶m);
}
}
double
speed_mpn_pre_set_str (struct speed_params *s)
{
unsigned char *str;
mp_ptr wp;
mp_size_t wn;
unsigned i;
int base;
double t;
mp_ptr powtab_mem, tp;
powers_t powtab[GMP_LIMB_BITS];
mp_size_t un;
int chars_per_limb;
TMP_DECL;
SPEED_RESTRICT_COND (s->size >= 1);
base = s->r == 0 ? 10 : s->r;
SPEED_RESTRICT_COND (base >= 2 && base <= 256);
TMP_MARK;
str = TMP_ALLOC (s->size);
for (i = 0; i < s->size; i++)
str[i] = s->xp[i] % base;
wn = ((mp_size_t) (s->size / __mp_bases[base].chars_per_bit_exactly))
/ BITS_PER_MP_LIMB + 2;
SPEED_TMP_ALLOC_LIMBS (wp, wn, s->align_wp);
/* use this during development to check wn is big enough */
/*
ASSERT_ALWAYS (mpn_set_str (wp, str, s->size, base) <= wn);
*/
speed_operand_src (s, (mp_ptr) str, s->size/BYTES_PER_MP_LIMB);
speed_operand_dst (s, wp, wn);
speed_cache_fill (s);
chars_per_limb = __mp_bases[base].chars_per_limb;
un = s->size / chars_per_limb + 1;
powtab_mem = TMP_BALLOC_LIMBS (mpn_dc_set_str_powtab_alloc (un));
mpn_set_str_compute_powtab (powtab, powtab_mem, un, base);
tp = TMP_BALLOC_LIMBS (mpn_dc_set_str_itch (un));
speed_starttime ();
i = s->reps;
do
{
mpn_pre_set_str (wp, str, s->size, powtab, tp);
}
while (--i != 0);
t = speed_endtime ();
TMP_FREE;
return t;
}
void
tune_set_str (void)
{
static struct param_t param;
s.r = 10; /* decimal */
{
static struct param_t param;
SET_STR_PRECOMPUTE_THRESHOLD = 0;
param.step_factor = 0.01;
param.name = "SET_STR_DC_THRESHOLD";
param.function = speed_mpn_pre_set_str;
param.min_size = 100;
param.max_size = 50000;
one (&set_str_dc_threshold, ¶m);
}
{
static struct param_t param;
param.step_factor = 0.02;
param.name = "SET_STR_PRECOMPUTE_THRESHOLD";
param.function = speed_mpn_set_str;
param.min_size = SET_STR_DC_THRESHOLD;
param.max_size = 100000;
one (&set_str_precompute_threshold, ¶m);
}
}
void
tune_fft_mul (void)
{
static struct fft_param_t param;
if (option_fft_max_size == 0)
return;
param.table_name = "MUL_FFT_TABLE";
param.threshold_name = "MUL_FFT_THRESHOLD";
param.p_threshold = &mul_fft_threshold;
param.modf_threshold_name = "MUL_FFT_MODF_THRESHOLD";
param.p_modf_threshold = &mul_fft_modf_threshold;
param.first_size = MUL_TOOM3_THRESHOLD / 2;
param.max_size = option_fft_max_size;
param.function = speed_mpn_mul_fft;
param.mul_function = speed_mpn_mul_n;
param.sqr = 0;
fft (¶m);
}
void
tune_fft_sqr (void)
{
static struct fft_param_t param;
if (option_fft_max_size == 0)
return;
param.table_name = "SQR_FFT_TABLE";
param.threshold_name = "SQR_FFT_THRESHOLD";
param.p_threshold = &sqr_fft_threshold;
param.modf_threshold_name = "SQR_FFT_MODF_THRESHOLD";
param.p_modf_threshold = &sqr_fft_modf_threshold;
param.first_size = SQR_TOOM3_THRESHOLD / 2;
param.max_size = option_fft_max_size;
param.function = speed_mpn_mul_fft_sqr;
param.mul_function = speed_mpn_sqr_n;
param.sqr = 0;
fft (¶m);
}
void
all (void)
{
time_t start_time, end_time;
TMP_DECL;
TMP_MARK;
SPEED_TMP_ALLOC_LIMBS (s.xp_block, SPEED_BLOCK_SIZE, 0);
SPEED_TMP_ALLOC_LIMBS (s.yp_block, SPEED_BLOCK_SIZE, 0);
mpn_random (s.xp_block, SPEED_BLOCK_SIZE);
mpn_random (s.yp_block, SPEED_BLOCK_SIZE);
fprintf (stderr, "Parameters for %s\n", GMP_MPARAM_H_SUGGEST);
speed_time_init ();
fprintf (stderr, "Using: %s\n", speed_time_string);
fprintf (stderr, "speed_precision %d", speed_precision);
if (speed_unittime == 1.0)
fprintf (stderr, ", speed_unittime 1 cycle");
else
fprintf (stderr, ", speed_unittime %.2e secs", speed_unittime);
if (speed_cycletime == 1.0 || speed_cycletime == 0.0)
fprintf (stderr, ", CPU freq unknown\n");
else
fprintf (stderr, ", CPU freq %.2f MHz\n", 1e-6/speed_cycletime);
fprintf (stderr, "DEFAULT_MAX_SIZE %d, fft_max_size %ld\n",
DEFAULT_MAX_SIZE, (long) option_fft_max_size);
fprintf (stderr, "\n");
time (&start_time);
{
struct tm *tp;
tp = localtime (&start_time);
printf ("/* Generated by tuneup.c, %d-%02d-%02d, ",
tp->tm_year+1900, tp->tm_mon+1, tp->tm_mday);
#ifdef __GNUC__
/* gcc sub-minor version doesn't seem to come through as a define */
printf ("gcc %d.%d */\n", __GNUC__, __GNUC_MINOR__);
#define PRINTED_COMPILER
#endif
#if defined (__SUNPRO_C)
printf ("Sun C %d.%d */\n", __SUNPRO_C / 0x100, __SUNPRO_C % 0x100);
#define PRINTED_COMPILER
#endif
#if ! defined (__GNUC__) && defined (__sgi) && defined (_COMPILER_VERSION)
/* gcc defines __sgi and _COMPILER_VERSION on irix 6, avoid that */
printf ("MIPSpro C %d.%d.%d */\n",
_COMPILER_VERSION / 100,
_COMPILER_VERSION / 10 % 10,
_COMPILER_VERSION % 10);
#define PRINTED_COMPILER
#endif
#if defined (__DECC) && defined (__DECC_VER)
printf ("DEC C %d */\n", __DECC_VER);
#define PRINTED_COMPILER
#endif
#if ! defined (PRINTED_COMPILER)
printf ("system compiler */\n");
#endif
}
printf ("\n");
tune_mul ();
printf("\n");
tune_sqr ();
printf("\n");
tune_mullow ();
printf("\n");
tune_sb_preinv ();
tune_dc ();
tune_powm ();
printf("\n");
tune_matrix22_mul ();
tune_hgcd ();
tune_gcd_dc ();
tune_gcdext_dc ();
#if 0
tune_gcd_accel ();
#endif
tune_jacobi_base ();
printf("\n");
tune_divrem_1 ();
tune_mod_1 ();
tune_preinv_divrem_1 ();
tune_preinv_mod_1 ();
tune_divrem_2 ();
tune_divexact_1 ();
tune_modexact_1_odd ();
printf("\n");
tune_get_str ();
tune_set_str ();
printf("\n");
tune_fft_mul ();
printf("\n");
tune_fft_sqr ();
printf ("\n");
time (&end_time);
printf ("/* Tuneup completed successfully, took %ld seconds */\n",
end_time - start_time);
TMP_FREE;
}
int
main (int argc, char *argv[])
{
int opt;
/* Unbuffered so if output is redirected to a file it isn't lost if the
program is killed part way through. */
setbuf (stdout, NULL);
setbuf (stderr, NULL);
while ((opt = getopt(argc, argv, "f:o:p:t")) != EOF)
{
switch (opt) {
case 'f':
if (optarg[0] == 't')
option_fft_trace = 2;
else
option_fft_max_size = atol (optarg);
break;
case 'o':
speed_option_set (optarg);
break;
case 'p':
speed_precision = atoi (optarg);
break;
case 't':
option_trace++;
break;
case '?':
exit(1);
}
}
all ();
exit (0);
}
| gpl-2.0 |
sensysnetworks/linux-2.6.28-at91 | arch/x86/kernel/microcode_amd.c | 13 | 11183 | /*
* AMD CPU Microcode Update Driver for Linux
* Copyright (C) 2008 Advanced Micro Devices Inc.
*
* Author: Peter Oruba <peter.oruba@amd.com>
*
* Based on work by:
* Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
*
* This driver allows to upgrade microcode on AMD
* family 0x10 and 0x11 processors.
*
* Licensed unter the terms of the GNU General Public
* License version 2. See file COPYING for details.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/microcode.h>
MODULE_DESCRIPTION("AMD Microcode Update Driver");
MODULE_AUTHOR("Peter Oruba");
MODULE_LICENSE("GPL v2");
#define UCODE_MAGIC 0x00414d44
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
#define UCODE_UCODE_TYPE 0x00000001
struct equiv_cpu_entry {
unsigned int installed_cpu;
unsigned int fixed_errata_mask;
unsigned int fixed_errata_compare;
unsigned int equiv_cpu;
};
struct microcode_header_amd {
unsigned int data_code;
unsigned int patch_id;
unsigned char mc_patch_data_id[2];
unsigned char mc_patch_data_len;
unsigned char init_flag;
unsigned int mc_patch_data_checksum;
unsigned int nb_dev_id;
unsigned int sb_dev_id;
unsigned char processor_rev_id[2];
unsigned char nb_rev_id;
unsigned char sb_rev_id;
unsigned char bios_api_rev;
unsigned char reserved1[3];
unsigned int match_reg[8];
};
struct microcode_amd {
struct microcode_header_amd hdr;
unsigned int mpb[0];
};
#define UCODE_MAX_SIZE (2048)
#define DEFAULT_UCODE_DATASIZE (896)
#define MC_HEADER_SIZE (sizeof(struct microcode_header_amd))
#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
#define DWSIZE (sizeof(u32))
/* For now we support a fixed ucode total size only */
#define get_totalsize(mc) \
((((struct microcode_amd *)mc)->hdr.mc_patch_data_len * 28) \
+ MC_HEADER_SIZE)
/* serialize access to the physical write */
static DEFINE_SPINLOCK(microcode_update_lock);
static struct equiv_cpu_entry *equiv_cpu_table;
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
printk(KERN_ERR "microcode: CPU%d not a capable AMD processor\n",
cpu);
return -1;
}
asm volatile("movl %1, %%ecx; rdmsr"
: "=a" (csig->rev)
: "i" (0x0000008B) : "ecx");
printk(KERN_INFO "microcode: collect_cpu_info_amd : patch_id=0x%x\n",
csig->rev);
return 0;
}
static int get_matching_microcode(int cpu, void *mc, int rev)
{
struct microcode_header_amd *mc_header = mc;
struct pci_dev *nb_pci_dev, *sb_pci_dev;
unsigned int current_cpu_id;
unsigned int equiv_cpu_id = 0x00;
unsigned int i = 0;
BUG_ON(equiv_cpu_table == NULL);
current_cpu_id = cpuid_eax(0x00000001);
while (equiv_cpu_table[i].installed_cpu != 0) {
if (current_cpu_id == equiv_cpu_table[i].installed_cpu) {
equiv_cpu_id = equiv_cpu_table[i].equiv_cpu;
break;
}
i++;
}
if (!equiv_cpu_id) {
printk(KERN_ERR "microcode: CPU%d cpu_id "
"not found in equivalent cpu table \n", cpu);
return 0;
}
if ((mc_header->processor_rev_id[0]) != (equiv_cpu_id & 0xff)) {
printk(KERN_ERR
"microcode: CPU%d patch does not match "
"(patch is %x, cpu extended is %x) \n",
cpu, mc_header->processor_rev_id[0],
(equiv_cpu_id & 0xff));
return 0;
}
if ((mc_header->processor_rev_id[1]) != ((equiv_cpu_id >> 16) & 0xff)) {
printk(KERN_ERR "microcode: CPU%d patch does not match "
"(patch is %x, cpu base id is %x) \n",
cpu, mc_header->processor_rev_id[1],
((equiv_cpu_id >> 16) & 0xff));
return 0;
}
/* ucode may be northbridge specific */
if (mc_header->nb_dev_id) {
nb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
(mc_header->nb_dev_id & 0xff),
NULL);
if ((!nb_pci_dev) ||
(mc_header->nb_rev_id != nb_pci_dev->revision)) {
printk(KERN_ERR "microcode: CPU%d NB mismatch \n", cpu);
pci_dev_put(nb_pci_dev);
return 0;
}
pci_dev_put(nb_pci_dev);
}
/* ucode may be southbridge specific */
if (mc_header->sb_dev_id) {
sb_pci_dev = pci_get_device(PCI_VENDOR_ID_AMD,
(mc_header->sb_dev_id & 0xff),
NULL);
if ((!sb_pci_dev) ||
(mc_header->sb_rev_id != sb_pci_dev->revision)) {
printk(KERN_ERR "microcode: CPU%d SB mismatch \n", cpu);
pci_dev_put(sb_pci_dev);
return 0;
}
pci_dev_put(sb_pci_dev);
}
if (mc_header->patch_id <= rev)
return 0;
return 1;
}
static void apply_microcode_amd(int cpu)
{
unsigned long flags;
unsigned int eax, edx;
unsigned int rev;
int cpu_num = raw_smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
struct microcode_amd *mc_amd = uci->mc;
unsigned long addr;
/* We should bind the task to the CPU */
BUG_ON(cpu_num != cpu);
if (mc_amd == NULL)
return;
spin_lock_irqsave(µcode_update_lock, flags);
addr = (unsigned long)&mc_amd->hdr.data_code;
edx = (unsigned int)(((unsigned long)upper_32_bits(addr)));
eax = (unsigned int)(((unsigned long)lower_32_bits(addr)));
asm volatile("movl %0, %%ecx; wrmsr" :
: "i" (0xc0010020), "a" (eax), "d" (edx) : "ecx");
/* get patch id after patching */
asm volatile("movl %1, %%ecx; rdmsr"
: "=a" (rev)
: "i" (0x0000008B) : "ecx");
spin_unlock_irqrestore(µcode_update_lock, flags);
/* check current patch id and patch's id for match */
if (rev != mc_amd->hdr.patch_id) {
printk(KERN_ERR "microcode: CPU%d update from revision "
"0x%x to 0x%x failed\n", cpu_num,
mc_amd->hdr.patch_id, rev);
return;
}
printk(KERN_INFO "microcode: CPU%d updated from revision "
"0x%x to 0x%x \n",
cpu_num, uci->cpu_sig.rev, mc_amd->hdr.patch_id);
uci->cpu_sig.rev = rev;
}
static void * get_next_ucode(u8 *buf, unsigned int size,
int (*get_ucode_data)(void *, const void *, size_t),
unsigned int *mc_size)
{
unsigned int total_size;
#define UCODE_CONTAINER_SECTION_HDR 8
u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
void *mc;
if (get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR))
return NULL;
if (section_hdr[0] != UCODE_UCODE_TYPE) {
printk(KERN_ERR "microcode: error! "
"Wrong microcode payload type field\n");
return NULL;
}
total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
printk(KERN_INFO "microcode: size %u, total_size %u\n",
size, total_size);
if (total_size > size || total_size > UCODE_MAX_SIZE) {
printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
return NULL;
}
mc = vmalloc(UCODE_MAX_SIZE);
if (mc) {
memset(mc, 0, UCODE_MAX_SIZE);
if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size)) {
vfree(mc);
mc = NULL;
} else
*mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
}
#undef UCODE_CONTAINER_SECTION_HDR
return mc;
}
static int install_equiv_cpu_table(u8 *buf,
int (*get_ucode_data)(void *, const void *, size_t))
{
#define UCODE_CONTAINER_HEADER_SIZE 12
u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
unsigned int *buf_pos = (unsigned int *)container_hdr;
unsigned long size;
if (get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE))
return 0;
size = buf_pos[2];
if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
printk(KERN_ERR "microcode: error! "
"Wrong microcode equivalnet cpu table\n");
return 0;
}
equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
if (!equiv_cpu_table) {
printk(KERN_ERR "microcode: error, can't allocate memory for equiv CPU table\n");
return 0;
}
buf += UCODE_CONTAINER_HEADER_SIZE;
if (get_ucode_data(equiv_cpu_table, buf, size)) {
vfree(equiv_cpu_table);
return 0;
}
return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
#undef UCODE_CONTAINER_HEADER_SIZE
}
static void free_equiv_cpu_table(void)
{
if (equiv_cpu_table) {
vfree(equiv_cpu_table);
equiv_cpu_table = NULL;
}
}
static int generic_load_microcode(int cpu, void *data, size_t size,
int (*get_ucode_data)(void *, const void *, size_t))
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u8 *ucode_ptr = data, *new_mc = NULL, *mc;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover;
unsigned long offset;
offset = install_equiv_cpu_table(ucode_ptr, get_ucode_data);
if (!offset) {
printk(KERN_ERR "microcode: installing equivalent cpu table failed\n");
return -EINVAL;
}
ucode_ptr += offset;
leftover = size - offset;
while (leftover) {
unsigned int uninitialized_var(mc_size);
struct microcode_header_amd *mc_header;
mc = get_next_ucode(ucode_ptr, leftover, get_ucode_data, &mc_size);
if (!mc)
break;
mc_header = (struct microcode_header_amd *)mc;
if (get_matching_microcode(cpu, mc, new_rev)) {
if (new_mc)
vfree(new_mc);
new_rev = mc_header->patch_id;
new_mc = mc;
} else
vfree(mc);
ucode_ptr += mc_size;
leftover -= mc_size;
}
if (new_mc) {
if (!leftover) {
if (uci->mc)
vfree(uci->mc);
uci->mc = new_mc;
pr_debug("microcode: CPU%d found a matching microcode update with"
" version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
} else
vfree(new_mc);
}
free_equiv_cpu_table();
return (int)leftover;
}
static int get_ucode_fw(void *to, const void *from, size_t n)
{
memcpy(to, from, n);
return 0;
}
static int request_microcode_fw(int cpu, struct device *device)
{
const char *fw_name = "amd-ucode/microcode_amd.bin";
const struct firmware *firmware;
int ret;
/* We should bind the task to the CPU */
BUG_ON(cpu != raw_smp_processor_id());
ret = request_firmware(&firmware, fw_name, device);
if (ret) {
printk(KERN_ERR "microcode: ucode data file %s load failed\n", fw_name);
return ret;
}
ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
&get_ucode_fw);
release_firmware(firmware);
return ret;
}
static int request_microcode_user(int cpu, const void __user *buf, size_t size)
{
printk(KERN_WARNING "microcode: AMD microcode update via /dev/cpu/microcode"
"is not supported\n");
return -1;
}
static void microcode_fini_cpu_amd(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
vfree(uci->mc);
uci->mc = NULL;
}
static struct microcode_ops microcode_amd_ops = {
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info_amd,
.apply_microcode = apply_microcode_amd,
.microcode_fini_cpu = microcode_fini_cpu_amd,
};
struct microcode_ops * __init init_amd_microcode(void)
{
return µcode_amd_ops;
}
| gpl-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.2.A.1.100/external/webkit/Source/WebCore/bindings/js/JSAudioConstructor.cpp | 13 | 3576 | /*
* Copyright (C) 2007, 2008, 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#if ENABLE(VIDEO)
#include "JSAudioConstructor.h"
#include "HTMLAudioElement.h"
#include "JSHTMLAudioElement.h"
#include <runtime/Error.h>
using namespace JSC;
namespace WebCore {
const ClassInfo JSAudioConstructor::s_info = { "AudioConstructor", &DOMConstructorWithDocument::s_info, 0, 0 };
JSAudioConstructor::JSAudioConstructor(ExecState* exec, JSDOMGlobalObject* globalObject)
: DOMConstructorWithDocument(JSAudioConstructor::createStructure(globalObject->globalData(), globalObject->objectPrototype()), globalObject)
{
ASSERT(inherits(&s_info));
putDirect(exec->globalData(), exec->propertyNames().prototype, JSHTMLAudioElementPrototype::self(exec, globalObject), None);
putDirect(exec->globalData(), exec->propertyNames().length, jsNumber(1), ReadOnly | DontDelete | DontEnum);
}
static EncodedJSValue JSC_HOST_CALL constructAudio(ExecState* exec)
{
JSAudioConstructor* jsConstructor = static_cast<JSAudioConstructor*>(exec->callee());
Document* document = jsConstructor->document();
if (!document)
return throwVMError(exec, createReferenceError(exec, "Audio constructor associated document is unavailable"));
// Calling toJS on the document causes the JS document wrapper to be
// added to the window object. This is done to ensure that JSDocument::markChildren
// will be called, which will cause the audio element to be marked if necessary.
toJS(exec, jsConstructor->globalObject(), document);
// FIXME: This converts an undefined argument to the string "undefined", but possibly we
// should treat it as if no argument was passed instead, by checking the value of exec->argument
// rather than looking at exec->argumentCount.
String src;
if (exec->argumentCount() > 0)
src = ustringToString(exec->argument(0).toString(exec));
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(),
HTMLAudioElement::createForJSConstructor(document, src))));
}
ConstructType JSAudioConstructor::getConstructData(ConstructData& constructData)
{
constructData.native.function = constructAudio;
return ConstructTypeHost;
}
} // namespace WebCore
#endif // ENABLE(VIDEO)
| gpl-2.0 |
j-r0dd/motus_kernel | init/do_mounts_rd.c | 13 | 8640 |
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
#include <linux/ext2_fs.h>
#include <linux/romfs_fs.h>
#include <linux/cramfs_fs.h>
#include <linux/initrd.h>
#include <linux/string.h>
#include "do_mounts.h"
#include "../fs/squashfs/squashfs_fs.h"
#include <linux/decompress/generic.h>
#include <linux/decompress/bunzip2.h>
#include <linux/decompress/unlzma.h>
#include <linux/decompress/inflate.h>
int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
static int __init prompt_ramdisk(char *str)
{
rd_prompt = simple_strtol(str,NULL,0) & 1;
return 1;
}
__setup("prompt_ramdisk=", prompt_ramdisk);
int __initdata rd_image_start; /* starting block # of image */
static int __init ramdisk_start_setup(char *str)
{
rd_image_start = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk_start=", ramdisk_start_setup);
static int __init crd_load(int in_fd, int out_fd, decompress_fn deco);
/*
* This routine tries to find a RAM disk image to load, and returns the
* number of blocks to read for a non-compressed image, 0 if the image
* is a compressed image, and -1 if an image with the right magic
* numbers could not be found.
*
* We currently check for the following magic numbers:
* minix
* ext2
* romfs
* cramfs
* squashfs
* gzip
*/
static const struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
} compressed_formats[] = {
#ifdef CONFIG_RD_GZIP
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
#endif
#ifdef CONFIG_RD_BZIP2
{ {0x42, 0x5a}, "bzip2", bunzip2 },
#endif
#ifdef CONFIG_RD_LZMA
{ {0x5d, 0x00}, "lzma", unlzma },
#endif
{ {0, 0}, NULL, NULL }
};
static int __init
identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
{
const int size = 512;
struct minix_super_block *minixsb;
struct ext2_super_block *ext2sb;
struct romfs_super_block *romfsb;
struct cramfs_super *cramfsb;
struct squashfs_super_block *squashfsb;
int nblocks = -1;
unsigned char *buf;
const struct compress_format *cf;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -1;
minixsb = (struct minix_super_block *) buf;
ext2sb = (struct ext2_super_block *) buf;
romfsb = (struct romfs_super_block *) buf;
cramfsb = (struct cramfs_super *) buf;
squashfsb = (struct squashfs_super_block *) buf;
memset(buf, 0xe5, size);
/*
* Read block 0 to test for compressed kernel
*/
sys_lseek(fd, start_block * BLOCK_SIZE, 0);
sys_read(fd, buf, size);
for (cf = compressed_formats; cf->decompressor; cf++) {
if (buf[0] == cf->magic[0] && buf[1] == cf->magic[1]) {
printk(KERN_NOTICE
"RAMDISK: %s image found at block %d\n",
cf->name, start_block);
*decompressor = cf->decompressor;
nblocks = 0;
goto done;
}
}
/* romfs is at block zero too */
if (romfsb->word0 == ROMSB_WORD0 &&
romfsb->word1 == ROMSB_WORD1) {
printk(KERN_NOTICE
"RAMDISK: romfs filesystem found at block %d\n",
start_block);
nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
goto done;
}
if (cramfsb->magic == CRAMFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: cramfs filesystem found at block %d\n",
start_block);
nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
goto done;
}
/* squashfs is at block zero too */
if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: squashfs filesystem found at block %d\n",
start_block);
nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1)
>> BLOCK_SIZE_BITS;
goto done;
}
/*
* Read block 1 to test for minix and ext2 superblock
*/
sys_lseek(fd, (start_block+1) * BLOCK_SIZE, 0);
sys_read(fd, buf, size);
/* Try minix */
if (minixsb->s_magic == MINIX_SUPER_MAGIC ||
minixsb->s_magic == MINIX_SUPER_MAGIC2) {
printk(KERN_NOTICE
"RAMDISK: Minix filesystem found at block %d\n",
start_block);
nblocks = minixsb->s_nzones << minixsb->s_log_zone_size;
goto done;
}
/* Try ext2 */
if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) {
printk(KERN_NOTICE
"RAMDISK: ext2 filesystem found at block %d\n",
start_block);
nblocks = le32_to_cpu(ext2sb->s_blocks_count) <<
le32_to_cpu(ext2sb->s_log_block_size);
goto done;
}
printk(KERN_NOTICE
"RAMDISK: Couldn't find valid RAM disk image starting at %d.\n",
start_block);
done:
sys_lseek(fd, start_block * BLOCK_SIZE, 0);
kfree(buf);
return nblocks;
}
int __init rd_load_image(char *from)
{
int res = 0;
int in_fd, out_fd;
unsigned long rd_blocks, devblocks;
int nblocks, i, disk;
char *buf = NULL;
unsigned short rotate = 0;
decompress_fn decompressor = NULL;
#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
out_fd = sys_open("/dev/ram", O_RDWR, 0);
if (out_fd < 0)
goto out;
in_fd = sys_open(from, O_RDONLY, 0);
if (in_fd < 0)
goto noclose_input;
nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor);
if (nblocks < 0)
goto done;
if (nblocks == 0) {
if (crd_load(in_fd, out_fd, decompressor) == 0)
goto successful_load;
goto done;
}
/*
* NOTE NOTE: nblocks is not actually blocks but
* the number of kibibytes of data to load into a ramdisk.
* So any ramdisk block size that is a multiple of 1KiB should
* work when the appropriate ramdisk_blocksize is specified
* on the command line.
*
* The default ramdisk_blocksize is 1KiB and it is generally
* silly to use anything else, so make sure to use 1KiB
* blocksize while generating ext2fs ramdisk-images.
*/
if (sys_ioctl(out_fd, BLKGETSIZE, (unsigned long)&rd_blocks) < 0)
rd_blocks = 0;
else
rd_blocks >>= 1;
if (nblocks > rd_blocks) {
printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n",
nblocks, rd_blocks);
goto done;
}
/*
* OK, time to copy in the data
*/
if (sys_ioctl(in_fd, BLKGETSIZE, (unsigned long)&devblocks) < 0)
devblocks = 0;
else
devblocks >>= 1;
if (strcmp(from, "/initrd.image") == 0)
devblocks = nblocks;
if (devblocks == 0) {
printk(KERN_ERR "RAMDISK: could not determine device size\n");
goto done;
}
buf = kmalloc(BLOCK_SIZE, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "RAMDISK: could not allocate buffer\n");
goto done;
}
printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ",
nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : "");
for (i = 0, disk = 1; i < nblocks; i++) {
if (i && (i % devblocks == 0)) {
printk("done disk #%d.\n", disk++);
rotate = 0;
if (sys_close(in_fd)) {
printk("Error closing the disk.\n");
goto noclose_input;
}
change_floppy("disk #%d", disk);
in_fd = sys_open(from, O_RDONLY, 0);
if (in_fd < 0) {
printk("Error opening disk.\n");
goto noclose_input;
}
printk("Loading disk #%d... ", disk);
}
sys_read(in_fd, buf, BLOCK_SIZE);
sys_write(out_fd, buf, BLOCK_SIZE);
#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
if (!(i % 16)) {
printk("%c\b", rotator[rotate & 0x3]);
rotate++;
}
#endif
}
printk("done.\n");
successful_load:
res = 1;
done:
sys_close(in_fd);
noclose_input:
sys_close(out_fd);
out:
kfree(buf);
sys_unlink("/dev/ram");
return res;
}
int __init rd_load_disk(int n)
{
if (rd_prompt)
change_floppy("root floppy disk to be loaded into RAM disk");
create_dev("/dev/root", ROOT_DEV);
create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n));
return rd_load_image("/dev/root");
}
static int exit_code;
static int decompress_error;
static int crd_infd, crd_outfd;
static int __init compr_fill(void *buf, unsigned int len)
{
int r = sys_read(crd_infd, buf, len);
if (r < 0)
printk(KERN_ERR "RAMDISK: error while reading compressed data");
else if (r == 0)
printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
return r;
}
static int __init compr_flush(void *window, unsigned int outcnt)
{
int written = sys_write(crd_outfd, window, outcnt);
if (written != outcnt) {
if (decompress_error == 0)
printk(KERN_ERR
"RAMDISK: incomplete write (%d != %d)\n",
written, outcnt);
decompress_error = 1;
return -1;
}
return outcnt;
}
static void __init error(char *x)
{
printk(KERN_ERR "%s\n", x);
exit_code = 1;
decompress_error = 1;
}
static int __init crd_load(int in_fd, int out_fd, decompress_fn deco)
{
int result;
crd_infd = in_fd;
crd_outfd = out_fd;
result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
if (decompress_error)
result = 1;
return result;
}
| gpl-2.0 |
nsubtil/tbb | src/test/test_priority_queue_node.cpp | 13 | 11084 | /*
Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
version 2 as published by the Free Software Foundation. Threading Building Blocks is
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of
the GNU General Public License along with Threading Building Blocks; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
As a special exception, you may use this file as part of a free software library without
restriction. Specifically, if other files instantiate templates or use macros or inline
functions from this file, or you compile this file and link it with other files to produce
an executable, this file does not by itself cause the resulting executable to be covered
by the GNU General Public License. This exception does not however invalidate any other
reasons why the executable file might be covered by the GNU General Public License.
*/
// TO DO: Add overlapping put / receive tests
#include "harness.h"
#include "tbb/flow_graph.h"
#include "harness_checktype.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
#include "harness_graph.h"
#endif
#include <cstdio>
#define N 10
#define C 10
template< typename T >
void spin_try_get( tbb::flow::priority_queue_node<T> &q, T &value ) {
while ( q.try_get(value) != true ) ;
}
template< typename T >
void check_item( T* next_value, T &value ) {
int tid = value / N;
int offset = value % N;
ASSERT( next_value[tid] == T(offset), NULL );
++next_value[tid];
}
template< typename T >
struct parallel_puts : NoAssign {
tbb::flow::priority_queue_node<T> &my_q;
parallel_puts( tbb::flow::priority_queue_node<T> &q ) : my_q(q) {}
void operator()(int i) const {
for (int j = 0; j < N; ++j) {
bool msg = my_q.try_put( T(N*i + j) );
ASSERT( msg == true, NULL );
}
}
};
template< typename T >
struct parallel_gets : NoAssign {
tbb::flow::priority_queue_node<T> &my_q;
parallel_gets( tbb::flow::priority_queue_node<T> &q) : my_q(q) {}
void operator()(int) const {
T prev;
spin_try_get( my_q, prev );
for (int j = 0; j < N-1; ++j) {
T v;
spin_try_get( my_q, v );
ASSERT(v < prev, NULL);
}
}
};
template< typename T >
struct parallel_put_get : NoAssign {
tbb::flow::priority_queue_node<T> &my_q;
parallel_put_get( tbb::flow::priority_queue_node<T> &q ) : my_q(q) {}
void operator()(int tid) const {
for ( int i = 0; i < N; i+=C ) {
int j_end = ( N < i + C ) ? N : i + C;
// dump about C values into the Q
for ( int j = i; j < j_end; ++j ) {
ASSERT( my_q.try_put( T (N*tid + j ) ) == true, NULL );
}
// receive about C values from the Q
for ( int j = i; j < j_end; ++j ) {
T v;
spin_try_get( my_q, v );
}
}
}
};
//
// Tests
//
// Item can be reserved, released, consumed ( single serial receiver )
//
template< typename T >
int test_reservation(int) {
tbb::flow::graph g;
// Simple tests
tbb::flow::priority_queue_node<T> q(g);
{
T bogus_value(-1);
q.try_put(T(1));
q.try_put(T(2));
q.try_put(T(3));
g.wait_for_all();
T v=bogus_value, w=bogus_value;
ASSERT( q.try_reserve(v) == true, NULL );
ASSERT( v == T(3), NULL );
ASSERT( q.try_release() == true, NULL );
v = bogus_value;
g.wait_for_all();
ASSERT( q.try_reserve(v) == true, NULL );
ASSERT( v == T(3), NULL );
ASSERT( q.try_consume() == true, NULL );
v = bogus_value;
g.wait_for_all();
ASSERT( q.try_get(v) == true, NULL );
ASSERT( v == T(2), NULL );
v = bogus_value;
g.wait_for_all();
ASSERT( q.try_reserve(v) == true, NULL );
ASSERT( v == T(1), NULL );
ASSERT( q.try_reserve(w) == false, NULL );
ASSERT( w == bogus_value, NULL );
ASSERT( q.try_get(w) == false, NULL );
ASSERT( w == bogus_value, NULL );
ASSERT( q.try_release() == true, NULL );
v = bogus_value;
g.wait_for_all();
ASSERT( q.try_reserve(v) == true, NULL );
ASSERT( v == T(1), NULL );
ASSERT( q.try_consume() == true, NULL );
v = bogus_value;
g.wait_for_all();
ASSERT( q.try_get(v) == false, NULL );
}
return 0;
}
//
// Tests
//
// multilpe parallel senders, items in FIFO (relatively to sender) order
// multilpe parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received
// * overlapped puts / gets
// * all puts finished before any getS
//
template< typename T >
int test_parallel(int num_threads) {
tbb::flow::graph g;
tbb::flow::priority_queue_node<T> q(g);
tbb::flow::priority_queue_node<T> q2(g);
tbb::flow::priority_queue_node<T> q3(g);
T bogus_value(-1);
T j = bogus_value;
NativeParallelFor( num_threads, parallel_puts<T>(q) );
for (int i = num_threads*N -1; i>=0; --i) {
spin_try_get( q, j );
ASSERT(j == i, NULL);
j = bogus_value;
}
g.wait_for_all();
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
NativeParallelFor( num_threads, parallel_puts<T>(q) );
g.wait_for_all();
NativeParallelFor( num_threads, parallel_gets<T>(q) );
g.wait_for_all();
j = bogus_value;
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
NativeParallelFor( num_threads, parallel_put_get<T>(q) );
g.wait_for_all();
j = bogus_value;
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
tbb::flow::make_edge( q, q2 );
tbb::flow::make_edge( q2, q3 );
NativeParallelFor( num_threads, parallel_puts<T>(q) );
g.wait_for_all();
NativeParallelFor( num_threads, parallel_gets<T>(q3) );
g.wait_for_all();
j = bogus_value;
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
ASSERT( q2.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
ASSERT( q3.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
// test copy constructor
ASSERT( q.remove_successor( q2 ) == true, NULL );
NativeParallelFor( num_threads, parallel_puts<T>(q) );
tbb::flow::priority_queue_node<T> q_copy(q);
g.wait_for_all();
j = bogus_value;
ASSERT( q_copy.try_get( j ) == false, NULL );
ASSERT( q.register_successor( q_copy ) == true, NULL );
for (int i = num_threads*N -1; i>=0; --i) {
spin_try_get( q_copy, j );
ASSERT(j == i, NULL);
j = bogus_value;
}
g.wait_for_all();
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
ASSERT( q_copy.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
return 0;
}
//
// Tests
//
// Predecessors cannot be registered
// Empty Q rejects item requests
// Single serial sender, items in FIFO order
// Chained Qs ( 2 & 3 ), single sender, items at last Q in FIFO order
//
template< typename T >
int test_serial() {
tbb::flow::graph g;
T bogus_value(-1);
tbb::flow::priority_queue_node<T> q(g);
tbb::flow::priority_queue_node<T> q2(g);
T j = bogus_value;
//
// Rejects attempts to add / remove predecessor
// Rejects request from empty Q
//
ASSERT( q.register_predecessor( q2 ) == false, NULL );
ASSERT( q.remove_predecessor( q2 ) == false, NULL );
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
//
// Simple puts and gets
//
for (int i = 0; i < N; ++i)
ASSERT( q.try_put( T(i) ), NULL );
for (int i = N-1; i >=0; --i) {
j = bogus_value;
spin_try_get( q, j );
ASSERT( i == j, NULL );
}
j = bogus_value;
g.wait_for_all();
ASSERT( q.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
tbb::flow::make_edge( q, q2 );
for (int i = 0; i < N; ++i)
ASSERT( q.try_put( T(i) ), NULL );
g.wait_for_all();
for (int i = N-1; i >= 0; --i) {
j = bogus_value;
spin_try_get( q2, j );
ASSERT( i == j, NULL );
}
j = bogus_value;
g.wait_for_all();
ASSERT( q.try_get( j ) == false, NULL );
g.wait_for_all();
ASSERT( q2.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
tbb::flow::remove_edge( q, q2 );
ASSERT( q.try_put( 1 ) == true, NULL );
g.wait_for_all();
ASSERT( q2.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
g.wait_for_all();
ASSERT( q.try_get( j ) == true, NULL );
ASSERT( j == 1, NULL );
tbb::flow::priority_queue_node<T> q3(g);
tbb::flow::make_edge( q, q2 );
tbb::flow::make_edge( q2, q3 );
for (int i = 0; i < N; ++i)
ASSERT( q.try_put( T(i) ), NULL );
g.wait_for_all();
for (int i = N-1; i >= 0; --i) {
j = bogus_value;
spin_try_get( q3, j );
ASSERT( i == j, NULL );
}
j = bogus_value;
g.wait_for_all();
ASSERT( q.try_get( j ) == false, NULL );
g.wait_for_all();
ASSERT( q2.try_get( j ) == false, NULL );
g.wait_for_all();
ASSERT( q3.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
tbb::flow::remove_edge( q, q2 );
ASSERT( q.try_put( 1 ) == true, NULL );
g.wait_for_all();
ASSERT( q2.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
g.wait_for_all();
ASSERT( q3.try_get( j ) == false, NULL );
ASSERT( j == bogus_value, NULL );
g.wait_for_all();
ASSERT( q.try_get( j ) == true, NULL );
ASSERT( j == 1, NULL );
return 0;
}
int TestMain() {
tbb::tick_count start = tbb::tick_count::now(), stop;
for (int p = 2; p <= 4; ++p) {
tbb::task_scheduler_init init(p);
test_serial<int>();
test_reservation<int>(p);
test_reservation<check_type<int> >(p);
test_parallel<int>(p);
}
stop = tbb::tick_count::now();
REMARK("Priority_Queue_Node Time=%6.6f\n", (stop-start).seconds());
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
REMARK("Testing resets\n");
test_resets<int,tbb::flow::priority_queue_node<int> >();
test_resets<float,tbb::flow::priority_queue_node<float> >();
test_buffer_extract<tbb::flow::priority_queue_node<int> >().run_tests();
#endif
return Harness::Done;
}
| gpl-2.0 |
sdwuyawen/linux2.6.21_helper2416 | drivers/scsi/ibmvscsi/ibmvstgt.c | 13 | 23028 | /*
* IBM eServer i/pSeries Virtual SCSI Target Driver
* Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
* Santiago Leon (santil@us.ibm.com) IBM Corp.
* Linda Xie (lxie@us.ibm.com) IBM Corp.
*
* Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tgt.h>
#include <scsi/libsrp.h>
#include <asm/hvcall.h>
#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/vio.h>
#include "ibmvscsi.h"
#define INITIAL_SRP_LIMIT 16
#define DEFAULT_MAX_SECTORS 512
#define TGT_NAME "ibmvstgt"
/*
* Hypervisor calls.
*/
#define h_copy_rdma(l, sa, sb, da, db) \
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
#define h_send_crq(ua, l, h) \
plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
#define h_reg_crq(ua, tok, sz)\
plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
#define h_free_crq(ua) \
plpar_hcall_norets(H_FREE_CRQ, ua);
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
struct vio_port {
struct vio_dev *dma_dev;
struct crq_queue crq_queue;
struct work_struct crq_work;
unsigned long liobn;
unsigned long riobn;
struct srp_target *target;
};
static struct workqueue_struct *vtgtd;
/*
* These are fixed for the system and come from the Open Firmware device tree.
* We just store them here to save getting them every time.
*/
static char system_id[64] = "";
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct vio_port *target_to_port(struct srp_target *target)
{
return (struct vio_port *) target->ldata;
}
static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
{
return (union viosrp_iu *) (iue->sbuf->buf);
}
static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
{
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
long rc, rc1;
union {
struct viosrp_crq cooked;
uint64_t raw[2];
} crq;
/* First copy the SRP */
rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
vport->riobn, iue->remote_token);
if (rc)
eprintk("Error %ld transferring data\n", rc);
crq.cooked.valid = 0x80;
crq.cooked.format = format;
crq.cooked.reserved = 0x00;
crq.cooked.timeout = 0x00;
crq.cooked.IU_length = length;
crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
if (rc == 0)
crq.cooked.status = 0x99; /* Just needs to be non-zero */
else
crq.cooked.status = 0x00;
rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
if (rc1) {
eprintk("%ld sending response\n", rc1);
return rc1;
}
return rc;
}
#define SRP_RSP_SENSE_DATA_LEN 18
static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
unsigned char status, unsigned char asc)
{
union viosrp_iu *iu = vio_iu(iue);
uint64_t tag = iu->srp.rsp.tag;
/* If the linked bit is on and status is good */
if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
status = 0x10;
memset(iu, 0, sizeof(struct srp_rsp));
iu->srp.rsp.opcode = SRP_RSP;
iu->srp.rsp.req_lim_delta = 1;
iu->srp.rsp.tag = tag;
if (test_bit(V_DIOVER, &iue->flags))
iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
iu->srp.rsp.data_in_res_cnt = 0;
iu->srp.rsp.data_out_res_cnt = 0;
iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
iu->srp.rsp.resp_data_len = 0;
iu->srp.rsp.status = status;
if (status) {
uint8_t *sense = iu->srp.rsp.data;
if (sc) {
iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
} else {
iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
/* Valid bit and 'current errors' */
sense[0] = (0x1 << 7 | 0x70);
/* Sense key */
sense[2] = status;
/* Additional sense length */
sense[7] = 0xa; /* 10 bytes */
/* Additional sense code */
sense[12] = asc;
}
}
send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
VIOSRP_SRP_FORMAT);
return 0;
}
static void handle_cmd_queue(struct srp_target *target)
{
struct Scsi_Host *shost = target->shost;
struct iu_entry *iue;
struct srp_cmd *cmd;
unsigned long flags;
int err;
retry:
spin_lock_irqsave(&target->lock, flags);
list_for_each_entry(iue, &target->cmd_queue, ilist) {
if (!test_and_set_bit(V_FLYING, &iue->flags)) {
spin_unlock_irqrestore(&target->lock, flags);
cmd = iue->sbuf->buf;
err = srp_cmd_queue(shost, cmd, iue, 0);
if (err) {
eprintk("cannot queue cmd %p %d\n", cmd, err);
srp_iu_put(iue);
}
goto retry;
}
}
spin_unlock_irqrestore(&target->lock, flags);
}
static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
struct srp_direct_buf *md, int nmd,
enum dma_data_direction dir, unsigned int rest)
{
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
dma_addr_t token;
long err;
unsigned int done = 0;
int i, sidx, soff;
sidx = soff = 0;
token = sg_dma_address(sg + sidx);
for (i = 0; i < nmd && rest; i++) {
unsigned int mdone, mlen;
mlen = min(rest, md[i].len);
for (mdone = 0; mlen;) {
int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
if (dir == DMA_TO_DEVICE)
err = h_copy_rdma(slen,
vport->riobn,
md[i].va + mdone,
vport->liobn,
token + soff);
else
err = h_copy_rdma(slen,
vport->liobn,
token + soff,
vport->riobn,
md[i].va + mdone);
if (err != H_SUCCESS) {
eprintk("rdma error %d %d\n", dir, slen);
goto out;
}
mlen -= slen;
mdone += slen;
soff += slen;
done += slen;
if (soff == sg_dma_len(sg + sidx)) {
sidx++;
soff = 0;
token = sg_dma_address(sg + sidx);
if (sidx > nsg) {
eprintk("out of sg %p %d %d\n",
iue, sidx, nsg);
goto out;
}
}
};
rest -= mlen;
}
out:
return 0;
}
static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *))
{
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
int err;
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
done(sc);
return err;
}
static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *))
{
unsigned long flags;
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
spin_lock_irqsave(&target->lock, flags);
list_del(&iue->ilist);
spin_unlock_irqrestore(&target->lock, flags);
if (sc->result != SAM_STAT_GOOD) {
eprintk("operation failed %p %d %x\n",
iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
} else
send_rsp(iue, sc, NO_SENSE, 0x00);
done(sc);
srp_iu_put(iue);
return 0;
}
int send_adapter_info(struct iu_entry *iue,
dma_addr_t remote_buffer, uint16_t length)
{
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
struct Scsi_Host *shost = target->shost;
dma_addr_t data_token;
struct mad_adapter_info_data *info;
int err;
info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
GFP_KERNEL);
if (!info) {
eprintk("bad dma_alloc_coherent %p\n", target);
return 1;
}
/* Get remote info */
err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
vport->liobn, data_token);
if (err == H_SUCCESS) {
dprintk("Client connect: %s (%d)\n",
info->partition_name, info->partition_number);
}
memset(info, 0, sizeof(*info));
strcpy(info->srp_version, "16.a");
strncpy(info->partition_name, partition_name,
sizeof(info->partition_name));
info->partition_number = partition_number;
info->mad_version = 1;
info->os_type = 2;
info->port_max_txu[0] = shost->hostt->max_sectors << 9;
/* Send our info to remote */
err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
vport->riobn, remote_buffer);
dma_free_coherent(target->dev, sizeof(*info), info, data_token);
if (err != H_SUCCESS) {
eprintk("Error sending adapter info %d\n", err);
return 1;
}
return 0;
}
static void process_login(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
/* TODO handle case that requested size is wrong and
* buffer format is wrong
*/
memset(iu, 0, sizeof(struct srp_login_rsp));
rsp->opcode = SRP_LOGIN_RSP;
rsp->req_lim_delta = INITIAL_SRP_LIMIT;
rsp->tag = tag;
rsp->max_it_iu_len = sizeof(union srp_iu);
rsp->max_ti_iu_len = sizeof(union srp_iu);
/* direct and indirect */
rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
}
static inline void queue_cmd(struct iu_entry *iue)
{
struct srp_target *target = iue->target;
unsigned long flags;
spin_lock_irqsave(&target->lock, flags);
list_add_tail(&iue->ilist, &target->cmd_queue);
spin_unlock_irqrestore(&target->lock, flags);
}
static int process_tsk_mgmt(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
int fn;
dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
case SRP_TSK_ABORT_TASK:
fn = ABORT_TASK;
break;
case SRP_TSK_ABORT_TASK_SET:
fn = ABORT_TASK_SET;
break;
case SRP_TSK_CLEAR_TASK_SET:
fn = CLEAR_TASK_SET;
break;
case SRP_TSK_LUN_RESET:
fn = LOGICAL_UNIT_RESET;
break;
case SRP_TSK_CLEAR_ACA:
fn = CLEAR_ACA;
break;
default:
fn = 0;
}
if (fn)
scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
iu->srp.tsk_mgmt.task_tag,
(struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
iue);
else
send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
return !fn;
}
static int process_mad_iu(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
struct viosrp_adapter_info *info;
struct viosrp_host_config *conf;
switch (iu->mad.empty_iu.common.type) {
case VIOSRP_EMPTY_IU_TYPE:
eprintk("%s\n", "Unsupported EMPTY MAD IU");
break;
case VIOSRP_ERROR_LOG_TYPE:
eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
iu->mad.error_log.common.status = 1;
send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
break;
case VIOSRP_ADAPTER_INFO_TYPE:
info = &iu->mad.adapter_info;
info->common.status = send_adapter_info(iue, info->buffer,
info->common.length);
send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
break;
case VIOSRP_HOST_CONFIG_TYPE:
conf = &iu->mad.host_config;
conf->common.status = 1;
send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
break;
default:
eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
}
return 1;
}
static int process_srp_iu(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
int done = 1;
u8 opcode = iu->srp.rsp.opcode;
switch (opcode) {
case SRP_LOGIN_REQ:
process_login(iue);
break;
case SRP_TSK_MGMT:
done = process_tsk_mgmt(iue);
break;
case SRP_CMD:
queue_cmd(iue);
done = 0;
break;
case SRP_LOGIN_RSP:
case SRP_I_LOGOUT:
case SRP_T_LOGOUT:
case SRP_RSP:
case SRP_CRED_REQ:
case SRP_CRED_RSP:
case SRP_AER_REQ:
case SRP_AER_RSP:
eprintk("Unsupported type %u\n", opcode);
break;
default:
eprintk("Unknown type %u\n", opcode);
}
return done;
}
static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
struct iu_entry *iue;
long err, done;
iue = srp_iu_get(target);
if (!iue) {
eprintk("Error getting IU from pool, %p\n", target);
return;
}
iue->remote_token = crq->IU_data_ptr;
err = h_copy_rdma(crq->IU_length, vport->riobn,
iue->remote_token, vport->liobn, iue->sbuf->dma);
if (err != H_SUCCESS) {
eprintk("%ld transferring data error %p\n", err, iue);
done = 1;
goto out;
}
if (crq->format == VIOSRP_MAD_FORMAT)
done = process_mad_iu(iue);
else
done = process_srp_iu(iue);
out:
if (done)
srp_iu_put(iue);
}
static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
{
struct srp_target *target = (struct srp_target *) data;
struct vio_port *vport = target_to_port(target);
vio_disable_interrupts(vport->dma_dev);
queue_work(vtgtd, &vport->crq_work);
return IRQ_HANDLED;
}
static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
{
int err;
struct vio_port *vport = target_to_port(target);
queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue->msg_token = dma_map_single(target->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(queue->msg_token))
goto map_failed;
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
PAGE_SIZE);
/* If the adapter was left active for some reason (like kexec)
* try freeing and re-registering
*/
if (err == H_RESOURCE) {
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
PAGE_SIZE);
}
if (err != H_SUCCESS && err != 2) {
eprintk("Error 0x%x opening virtual adapter\n", err);
goto reg_crq_failed;
}
err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
IRQF_DISABLED, "ibmvstgt", target);
if (err)
goto req_irq_failed;
vio_enable_interrupts(vport->dma_dev);
h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
queue->cur = 0;
spin_lock_init(&queue->lock);
return 0;
req_irq_failed:
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
reg_crq_failed:
dma_unmap_single(target->dev, queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long) queue->msgs);
malloc_failed:
return -ENOMEM;
}
static void crq_queue_destroy(struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
struct crq_queue *queue = &vport->crq_queue;
int err;
free_irq(vport->dma_dev->irq, target);
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
dma_unmap_single(target->dev, queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long) queue->msgs);
}
static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
dprintk("%x %x\n", crq->valid, crq->format);
switch (crq->valid) {
case 0xC0:
/* initialization */
switch (crq->format) {
case 0x01:
h_send_crq(vport->dma_dev->unit_address,
0xC002000000000000, 0);
break;
case 0x02:
break;
default:
eprintk("Unknown format %u\n", crq->format);
}
break;
case 0xFF:
/* transport event */
break;
case 0x80:
/* real payload */
switch (crq->format) {
case VIOSRP_SRP_FORMAT:
case VIOSRP_MAD_FORMAT:
process_iu(crq, target);
break;
case VIOSRP_OS400_FORMAT:
case VIOSRP_AIX_FORMAT:
case VIOSRP_LINUX_FORMAT:
case VIOSRP_INLINE_FORMAT:
eprintk("Unsupported format %u\n", crq->format);
break;
default:
eprintk("Unknown format %u\n", crq->format);
}
break;
default:
eprintk("unknown message type 0x%02x!?\n", crq->valid);
}
}
static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
{
struct viosrp_crq *crq;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
} else
crq = NULL;
spin_unlock_irqrestore(&queue->lock, flags);
return crq;
}
static void handle_crq(struct work_struct *work)
{
struct vio_port *vport = container_of(work, struct vio_port, crq_work);
struct srp_target *target = vport->target;
struct viosrp_crq *crq;
int done = 0;
while (!done) {
while ((crq = next_crq(&vport->crq_queue)) != NULL) {
process_crq(crq, target);
crq->valid = 0x00;
}
vio_enable_interrupts(vport->dma_dev);
crq = next_crq(&vport->crq_queue);
if (crq) {
vio_disable_interrupts(vport->dma_dev);
process_crq(crq, target);
crq->valid = 0x00;
} else
done = 1;
}
handle_cmd_queue(target);
}
static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
{
unsigned long flags;
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
spin_lock_irqsave(&target->lock, flags);
list_del(&iue->ilist);
spin_unlock_irqrestore(&target->lock, flags);
srp_iu_put(iue);
return 0;
}
static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
{
struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
union viosrp_iu *iu = vio_iu(iue);
unsigned char status, asc;
eprintk("%p %d\n", iue, result);
status = NO_SENSE;
asc = 0;
switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
case SRP_TSK_ABORT_TASK:
asc = 0x14;
if (result)
status = ABORTED_COMMAND;
break;
default:
break;
}
send_rsp(iue, NULL, status, asc);
srp_iu_put(iue);
return 0;
}
static ssize_t system_id_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
}
static ssize_t partition_number_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
}
static ssize_t unit_address_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct srp_target *target = host_to_srp_target(shost);
struct vio_port *vport = target_to_port(target);
return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
}
static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
static struct class_device_attribute *ibmvstgt_attrs[] = {
&class_device_attr_system_id,
&class_device_attr_partition_number,
&class_device_attr_unit_address,
NULL,
};
static struct scsi_host_template ibmvstgt_sht = {
.name = TGT_NAME,
.module = THIS_MODULE,
.can_queue = INITIAL_SRP_LIMIT,
.sg_tablesize = SG_ALL,
.use_clustering = DISABLE_CLUSTERING,
.max_sectors = DEFAULT_MAX_SECTORS,
.transfer_response = ibmvstgt_cmd_done,
.transfer_data = ibmvstgt_transfer_data,
.eh_abort_handler = ibmvstgt_eh_abort_handler,
.tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
.shost_attrs = ibmvstgt_attrs,
.proc_name = TGT_NAME,
};
static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
struct Scsi_Host *shost;
struct srp_target *target;
struct vio_port *vport;
unsigned int *dma, dma_size;
int err = -ENOMEM;
vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
if (!vport)
return err;
shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
if (!shost)
goto free_vport;
err = scsi_tgt_alloc_queue(shost);
if (err)
goto put_host;
target = host_to_srp_target(shost);
target->shost = shost;
vport->dma_dev = dev;
target->ldata = vport;
vport->target = target;
err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
SRP_MAX_IU_LEN);
if (err)
goto put_host;
dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
&dma_size);
if (!dma || dma_size != 40) {
eprintk("Couldn't get window property %d\n", dma_size);
err = -EIO;
goto free_srp_target;
}
vport->liobn = dma[0];
vport->riobn = dma[5];
INIT_WORK(&vport->crq_work, handle_crq);
err = crq_queue_create(&vport->crq_queue, target);
if (err)
goto free_srp_target;
err = scsi_add_host(shost, target->dev);
if (err)
goto destroy_queue;
return 0;
destroy_queue:
crq_queue_destroy(target);
free_srp_target:
srp_target_free(target);
put_host:
scsi_host_put(shost);
free_vport:
kfree(vport);
return err;
}
static int ibmvstgt_remove(struct vio_dev *dev)
{
struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
struct Scsi_Host *shost = target->shost;
struct vio_port *vport = target->ldata;
crq_queue_destroy(target);
scsi_remove_host(shost);
scsi_tgt_free_queue(shost);
srp_target_free(target);
kfree(vport);
scsi_host_put(shost);
return 0;
}
static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
{"v-scsi-host", "IBM,v-scsi-host"},
{"",""}
};
MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
static struct vio_driver ibmvstgt_driver = {
.id_table = ibmvstgt_device_table,
.probe = ibmvstgt_probe,
.remove = ibmvstgt_remove,
.driver = {
.name = "ibmvscsis",
.owner = THIS_MODULE,
}
};
static int get_system_info(void)
{
struct device_node *rootdn;
const char *id, *model, *name;
unsigned int *num;
rootdn = find_path_device("/");
if (!rootdn)
return -ENOENT;
model = get_property(rootdn, "model", NULL);
id = get_property(rootdn, "system-id", NULL);
if (model && id)
snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
name = get_property(rootdn, "ibm,partition-name", NULL);
if (name)
strncpy(partition_name, name, sizeof(partition_name));
num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
if (num)
partition_number = *num;
return 0;
}
static int ibmvstgt_init(void)
{
int err = -ENOMEM;
printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
vtgtd = create_workqueue("ibmvtgtd");
if (!vtgtd)
return err;
err = get_system_info();
if (err)
goto destroy_wq;
err = vio_register_driver(&ibmvstgt_driver);
if (err)
goto destroy_wq;
return 0;
destroy_wq:
destroy_workqueue(vtgtd);
return err;
}
static void ibmvstgt_exit(void)
{
printk("Unregister IBM virtual SCSI driver\n");
destroy_workqueue(vtgtd);
vio_unregister_driver(&ibmvstgt_driver);
}
MODULE_DESCRIPTION("IBM Virtual SCSI Target");
MODULE_AUTHOR("Santiago Leon");
MODULE_LICENSE("GPL");
module_init(ibmvstgt_init);
module_exit(ibmvstgt_exit);
| gpl-2.0 |
gablg1/ubuntu-vivid-docker-cr | lib/seq_buf.c | 13 | 9046 | /*
* seq_buf.c
*
* Copyright (C) 2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
* The seq_buf is a handy tool that allows you to pass a descriptor around
* to a buffer that other functions can write to. It is similar to the
* seq_file functionality but has some differences.
*
* To use it, the seq_buf must be initialized with seq_buf_init().
* This will set up the counters within the descriptor. You can call
* seq_buf_init() more than once to reset the seq_buf to start
* from scratch.
*/
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/seq_buf.h>
/**
* seq_buf_can_fit - can the new data fit in the current buffer?
* @s: the seq_buf descriptor
* @len: The length to see if it can fit in the current buffer
*
* Returns true if there's enough unused space in the seq_buf buffer
* to fit the amount of new data according to @len.
*/
static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
{
return s->len + len <= s->size;
}
/**
* seq_buf_print_seq - move the contents of seq_buf into a seq_file
* @m: the seq_file descriptor that is the destination
* @s: the seq_buf descriptor that is the source.
*
* Returns zero on success, non zero otherwise
*/
int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
{
unsigned int len = seq_buf_used(s);
return seq_write(m, s->buffer, len);
}
/**
* seq_buf_vprintf - sequence printing of information.
* @s: seq_buf descriptor
* @fmt: printf format string
* @args: va_list of arguments from a printf() type function
*
* Writes a vnprintf() format into the sequencce buffer.
*
* Returns zero on success, -1 on overflow.
*/
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
{
int len;
WARN_ON(s->size == 0);
if (s->len < s->size) {
len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
if (s->len + len < s->size) {
s->len += len;
return 0;
}
}
seq_buf_set_overflow(s);
return -1;
}
/**
* seq_buf_printf - sequence printing of information
* @s: seq_buf descriptor
* @fmt: printf format string
*
* Writes a printf() format into the sequence buffer.
*
* Returns zero on success, -1 on overflow.
*/
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = seq_buf_vprintf(s, fmt, ap);
va_end(ap);
return ret;
}
/**
* seq_buf_bitmask - write a bitmask array in its ASCII representation
* @s: seq_buf descriptor
* @maskp: points to an array of unsigned longs that represent a bitmask
* @nmaskbits: The number of bits that are valid in @maskp
*
* Writes a ASCII representation of a bitmask string into @s.
*
* Returns zero on success, -1 on overflow.
*/
int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
int nmaskbits)
{
unsigned int len = seq_buf_buffer_left(s);
int ret;
WARN_ON(s->size == 0);
/*
* Note, because bitmap_scnprintf() only returns the number of bytes
* written and not the number that would be written, we use the last
* byte of the buffer to let us know if we overflowed. There's a small
* chance that the bitmap could have fit exactly inside the buffer, but
* it's not that critical if that does happen.
*/
if (len > 1) {
ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
if (ret < len) {
s->len += ret;
return 0;
}
}
seq_buf_set_overflow(s);
return -1;
}
#ifdef CONFIG_BINARY_PRINTF
/**
* seq_buf_bprintf - Write the printf string from binary arguments
* @s: seq_buf descriptor
* @fmt: The format string for the @binary arguments
* @binary: The binary arguments for @fmt.
*
* When recording in a fast path, a printf may be recorded with just
* saving the format and the arguments as they were passed to the
* function, instead of wasting cycles converting the arguments into
* ASCII characters. Instead, the arguments are saved in a 32 bit
* word array that is defined by the format string constraints.
*
* This function will take the format and the binary array and finish
* the conversion into the ASCII string within the buffer.
*
* Returns zero on success, -1 on overflow.
*/
int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
{
unsigned int len = seq_buf_buffer_left(s);
int ret;
WARN_ON(s->size == 0);
if (s->len < s->size) {
ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
if (s->len + ret < s->size) {
s->len += ret;
return 0;
}
}
seq_buf_set_overflow(s);
return -1;
}
#endif /* CONFIG_BINARY_PRINTF */
/**
* seq_buf_puts - sequence printing of simple string
* @s: seq_buf descriptor
* @str: simple string to record
*
* Copy a simple string into the sequence buffer.
*
* Returns zero on success, -1 on overflow
*/
int seq_buf_puts(struct seq_buf *s, const char *str)
{
unsigned int len = strlen(str);
WARN_ON(s->size == 0);
if (seq_buf_can_fit(s, len)) {
memcpy(s->buffer + s->len, str, len);
s->len += len;
return 0;
}
seq_buf_set_overflow(s);
return -1;
}
/**
* seq_buf_putc - sequence printing of simple character
* @s: seq_buf descriptor
* @c: simple character to record
*
* Copy a single character into the sequence buffer.
*
* Returns zero on success, -1 on overflow
*/
int seq_buf_putc(struct seq_buf *s, unsigned char c)
{
WARN_ON(s->size == 0);
if (seq_buf_can_fit(s, 1)) {
s->buffer[s->len++] = c;
return 0;
}
seq_buf_set_overflow(s);
return -1;
}
/**
* seq_buf_putmem - write raw data into the sequenc buffer
* @s: seq_buf descriptor
* @mem: The raw memory to copy into the buffer
* @len: The length of the raw memory to copy (in bytes)
*
* There may be cases where raw memory needs to be written into the
* buffer and a strcpy() would not work. Using this function allows
* for such cases.
*
* Returns zero on success, -1 on overflow
*/
int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
{
WARN_ON(s->size == 0);
if (seq_buf_can_fit(s, len)) {
memcpy(s->buffer + s->len, mem, len);
s->len += len;
return 0;
}
seq_buf_set_overflow(s);
return -1;
}
#define MAX_MEMHEX_BYTES 8U
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
/**
* seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex
* @s: seq_buf descriptor
* @mem: The raw memory to write its hex ASCII representation of
* @len: The length of the raw memory to copy (in bytes)
*
* This is similar to seq_buf_putmem() except instead of just copying the
* raw memory into the buffer it writes its ASCII representation of it
* in hex characters.
*
* Returns zero on success, -1 on overflow
*/
int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len)
{
unsigned char hex[HEX_CHARS];
const unsigned char *data = mem;
unsigned int start_len;
int i, j;
WARN_ON(s->size == 0);
while (len) {
start_len = min(len, HEX_CHARS - 1);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
for (i = start_len-1, j = 0; i >= 0; i--) {
#endif
hex[j++] = hex_asc_hi(data[i]);
hex[j++] = hex_asc_lo(data[i]);
}
if (WARN_ON_ONCE(j == 0 || j/2 > len))
break;
/* j increments twice per loop */
len -= j / 2;
hex[j++] = ' ';
seq_buf_putmem(s, hex, j);
if (seq_buf_has_overflowed(s))
return -1;
}
return 0;
}
/**
* seq_buf_path - copy a path into the sequence buffer
* @s: seq_buf descriptor
* @path: path to write into the sequence buffer.
* @esc: set of characters to escape in the output
*
* Write a path name into the sequence buffer.
*
* Returns the number of written bytes on success, -1 on overflow
*/
int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
{
char *buf;
size_t size = seq_buf_get_buf(s, &buf);
int res = -1;
WARN_ON(s->size == 0);
if (size) {
char *p = d_path(path, buf, size);
if (!IS_ERR(p)) {
char *end = mangle_path(buf, p, esc);
if (end)
res = end - buf;
}
}
seq_buf_commit(s, res);
return res;
}
/**
* seq_buf_to_user - copy the squence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy
*
* Copies the sequence buffer into the userspace memory pointed to
* by @ubuf. It starts from the last read position (@s->readpos)
* and writes up to @cnt characters or till it reaches the end of
* the content in the buffer (@s->len), which ever comes first.
*
* On success, it returns a positive number of the number of bytes
* it copied.
*
* On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the
* sequence (@s->len == @s->readpos).
*
* Returns -EFAULT if the copy to userspace fails.
*/
int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
{
int len;
int ret;
if (!cnt)
return 0;
if (s->len <= s->readpos)
return -EBUSY;
len = seq_buf_used(s) - s->readpos;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
if (ret == cnt)
return -EFAULT;
cnt -= ret;
s->readpos += cnt;
return cnt;
}
| gpl-2.0 |
Shauren/TrinityCore | src/server/scripts/Kalimdor/CavernsOfTime/TheBlackMorass/boss_aeonus.cpp | 13 | 3858 | /*
* This file is part of the TrinityCore Project. See AUTHORS file for Copyright information
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
Name: Boss_Aeonus
%Complete: 80
Comment: Some spells not implemented
Category: Caverns of Time, The Dark Portal
*/
#include "ScriptMgr.h"
#include "InstanceScript.h"
#include "ScriptedCreature.h"
#include "the_black_morass.h"
enum Enums
{
SAY_ENTER = 0,
SAY_AGGRO = 1,
SAY_BANISH = 2,
SAY_SLAY = 3,
SAY_DEATH = 4,
EMOTE_FRENZY = 5,
SPELL_CLEAVE = 40504,
SPELL_TIME_STOP = 31422,
SPELL_ENRAGE = 37605,
SPELL_SAND_BREATH = 31473,
H_SPELL_SAND_BREATH = 39049
};
enum Events
{
EVENT_SANDBREATH = 1,
EVENT_TIMESTOP = 2,
EVENT_FRENZY = 3
};
struct boss_aeonus : public BossAI
{
boss_aeonus(Creature* creature) : BossAI(creature, TYPE_AEONUS) { }
void Reset() override { }
void JustEngagedWith(Unit* /*who*/) override
{
events.ScheduleEvent(EVENT_SANDBREATH, 15s, 30s);
events.ScheduleEvent(EVENT_TIMESTOP, 10s, 15s);
events.ScheduleEvent(EVENT_FRENZY, 30s, 45s);
Talk(SAY_AGGRO);
}
void MoveInLineOfSight(Unit* who) override
{
//Despawn Time Keeper
if (who->GetTypeId() == TYPEID_UNIT && who->GetEntry() == NPC_TIME_KEEPER)
{
if (me->IsWithinDistInMap(who, 20.0f))
{
Talk(SAY_BANISH);
Unit::DealDamage(me, who, who->GetHealth(), nullptr, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, nullptr, false);
}
}
ScriptedAI::MoveInLineOfSight(who);
}
void JustDied(Unit* /*killer*/) override
{
Talk(SAY_DEATH);
instance->SetData(TYPE_RIFT, DONE);
instance->SetData(TYPE_MEDIVH, DONE); // FIXME: later should be removed
}
void KilledUnit(Unit* who) override
{
if (who->GetTypeId() == TYPEID_PLAYER)
Talk(SAY_SLAY);
}
void UpdateAI(uint32 diff) override
{
//Return since we have no target
if (!UpdateVictim())
return;
events.Update(diff);
if (me->HasUnitState(UNIT_STATE_CASTING))
return;
while (uint32 eventId = events.ExecuteEvent())
{
switch (eventId)
{
case EVENT_SANDBREATH:
DoCastVictim(SPELL_SAND_BREATH);
events.ScheduleEvent(EVENT_SANDBREATH, 15s, 25s);
break;
case EVENT_TIMESTOP:
DoCastVictim(SPELL_TIME_STOP);
events.ScheduleEvent(EVENT_TIMESTOP, 20s, 35s);
break;
case EVENT_FRENZY:
Talk(EMOTE_FRENZY);
DoCast(me, SPELL_ENRAGE);
events.ScheduleEvent(EVENT_FRENZY, 20s, 35s);
break;
default:
break;
}
if (me->HasUnitState(UNIT_STATE_CASTING))
return;
}
DoMeleeAttackIfReady();
}
};
void AddSC_boss_aeonus()
{
RegisterBlackMorassCreatureAI(boss_aeonus);
}
| gpl-2.0 |
freak97/binutils | bfd/mach-o-i386.c | 13 | 13446 | /* Intel i386 Mach-O support for BFD.
Copyright (C) 2009-2016 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA. */
#include "sysdep.h"
#include "mach-o.h"
#include "bfd.h"
#include "libbfd.h"
#include "libiberty.h"
#include "mach-o/reloc.h"
#define bfd_mach_o_object_p bfd_mach_o_i386_object_p
#define bfd_mach_o_core_p bfd_mach_o_i386_core_p
#define bfd_mach_o_mkobject bfd_mach_o_i386_mkobject
static const bfd_target *
bfd_mach_o_i386_object_p (bfd *abfd)
{
return bfd_mach_o_header_p (abfd, 0, 0, BFD_MACH_O_CPU_TYPE_I386);
}
static const bfd_target *
bfd_mach_o_i386_core_p (bfd *abfd)
{
return bfd_mach_o_header_p (abfd, 0,
BFD_MACH_O_MH_CORE, BFD_MACH_O_CPU_TYPE_I386);
}
static bfd_boolean
bfd_mach_o_i386_mkobject (bfd *abfd)
{
bfd_mach_o_data_struct *mdata;
if (!bfd_mach_o_mkobject_init (abfd))
return FALSE;
mdata = bfd_mach_o_get_data (abfd);
mdata->header.magic = BFD_MACH_O_MH_MAGIC;
mdata->header.cputype = BFD_MACH_O_CPU_TYPE_I386;
mdata->header.cpusubtype = BFD_MACH_O_CPU_SUBTYPE_X86_ALL;
mdata->header.byteorder = BFD_ENDIAN_LITTLE;
mdata->header.version = 1;
return TRUE;
}
static reloc_howto_type i386_howto_table[]=
{
/* 0 */
HOWTO(BFD_RELOC_32, 0, 2, 32, FALSE, 0,
complain_overflow_bitfield,
NULL, "32",
FALSE, 0xffffffff, 0xffffffff, FALSE),
HOWTO(BFD_RELOC_16, 0, 1, 16, FALSE, 0,
complain_overflow_bitfield,
NULL, "16",
FALSE, 0xffff, 0xffff, FALSE),
HOWTO(BFD_RELOC_8, 0, 0, 8, FALSE, 0,
complain_overflow_bitfield,
NULL, "8",
FALSE, 0xff, 0xff, FALSE),
HOWTO(BFD_RELOC_32_PCREL, 0, 2, 32, TRUE, 0,
complain_overflow_bitfield,
NULL, "DISP32",
FALSE, 0xffffffff, 0xffffffff, TRUE),
/* 4 */
HOWTO(BFD_RELOC_16_PCREL, 0, 1, 16, TRUE, 0,
complain_overflow_bitfield,
NULL, "DISP16",
FALSE, 0xffff, 0xffff, TRUE),
HOWTO(BFD_RELOC_MACH_O_SECTDIFF, 0, 2, 32, FALSE, 0,
complain_overflow_bitfield,
NULL, "SECTDIFF_32",
FALSE, 0xffffffff, 0xffffffff, FALSE),
HOWTO(BFD_RELOC_MACH_O_LOCAL_SECTDIFF, 0, 2, 32, FALSE, 0,
complain_overflow_bitfield,
NULL, "LSECTDIFF_32",
FALSE, 0xffffffff, 0xffffffff, FALSE),
HOWTO(BFD_RELOC_MACH_O_PAIR, 0, 2, 32, FALSE, 0,
complain_overflow_bitfield,
NULL, "PAIR_32",
FALSE, 0xffffffff, 0xffffffff, FALSE),
/* 8 */
HOWTO(BFD_RELOC_MACH_O_SECTDIFF, 0, 1, 16, FALSE, 0,
complain_overflow_bitfield,
NULL, "SECTDIFF_16",
FALSE, 0xffff, 0xffff, FALSE),
HOWTO(BFD_RELOC_MACH_O_LOCAL_SECTDIFF, 0, 1, 16, FALSE, 0,
complain_overflow_bitfield,
NULL, "LSECTDIFF_16",
FALSE, 0xffff, 0xffff, FALSE),
HOWTO(BFD_RELOC_MACH_O_PAIR, 0, 1, 16, FALSE, 0,
complain_overflow_bitfield,
NULL, "PAIR_16",
FALSE, 0xffff, 0xffff, FALSE),
};
static bfd_boolean
bfd_mach_o_i386_canonicalize_one_reloc (bfd *abfd,
struct mach_o_reloc_info_external *raw,
arelent *res, asymbol **syms)
{
bfd_mach_o_reloc_info reloc;
if (!bfd_mach_o_pre_canonicalize_one_reloc (abfd, raw, &reloc, res, syms))
return FALSE;
if (reloc.r_scattered)
{
switch (reloc.r_type)
{
case BFD_MACH_O_GENERIC_RELOC_PAIR:
if (reloc.r_length == 2)
{
res->howto = &i386_howto_table[7];
res->address = res[-1].address;
return TRUE;
}
else if (reloc.r_length == 1)
{
res->howto = &i386_howto_table[10];
res->address = res[-1].address;
return TRUE;
}
return FALSE;
case BFD_MACH_O_GENERIC_RELOC_SECTDIFF:
if (reloc.r_length == 2)
{
res->howto = &i386_howto_table[5];
return TRUE;
}
else if (reloc.r_length == 1)
{
res->howto = &i386_howto_table[8];
return TRUE;
}
return FALSE;
case BFD_MACH_O_GENERIC_RELOC_LOCAL_SECTDIFF:
if (reloc.r_length == 2)
{
res->howto = &i386_howto_table[6];
return TRUE;
}
else if (reloc.r_length == 1)
{
res->howto = &i386_howto_table[9];
return TRUE;
}
return FALSE;
default:
return FALSE;
}
}
else
{
switch (reloc.r_type)
{
case BFD_MACH_O_GENERIC_RELOC_VANILLA:
switch ((reloc.r_length << 1) | reloc.r_pcrel)
{
case 0: /* len = 0, pcrel = 0 */
res->howto = &i386_howto_table[2];
return TRUE;
case 2: /* len = 1, pcrel = 0 */
res->howto = &i386_howto_table[1];
return TRUE;
case 3: /* len = 1, pcrel = 1 */
res->howto = &i386_howto_table[4];
return TRUE;
case 4: /* len = 2, pcrel = 0 */
res->howto = &i386_howto_table[0];
return TRUE;
case 5: /* len = 2, pcrel = 1 */
res->howto = &i386_howto_table[3];
return TRUE;
default:
return FALSE;
}
break;
default:
return FALSE;
}
}
}
static bfd_boolean
bfd_mach_o_i386_swap_reloc_out (arelent *rel, bfd_mach_o_reloc_info *rinfo)
{
rinfo->r_address = rel->address;
switch (rel->howto->type)
{
case BFD_RELOC_32:
case BFD_RELOC_32_PCREL:
case BFD_RELOC_16:
case BFD_RELOC_16_PCREL:
case BFD_RELOC_8:
rinfo->r_scattered = 0;
rinfo->r_type = BFD_MACH_O_GENERIC_RELOC_VANILLA;
rinfo->r_pcrel = rel->howto->pc_relative;
rinfo->r_length = rel->howto->size; /* Correct in practice. */
if ((*rel->sym_ptr_ptr)->flags & BSF_SECTION_SYM)
{
rinfo->r_extern = 0;
rinfo->r_value =
(*rel->sym_ptr_ptr)->section->output_section->target_index;
}
else
{
rinfo->r_extern = 1;
rinfo->r_value = (*rel->sym_ptr_ptr)->udata.i;
}
break;
case BFD_RELOC_MACH_O_SECTDIFF:
rinfo->r_scattered = 1;
rinfo->r_type = BFD_MACH_O_GENERIC_RELOC_SECTDIFF;
rinfo->r_pcrel = 0;
rinfo->r_length = rel->howto->size;
rinfo->r_extern = 0;
rinfo->r_value = rel->addend;
break;
case BFD_RELOC_MACH_O_LOCAL_SECTDIFF:
rinfo->r_scattered = 1;
rinfo->r_type = BFD_MACH_O_GENERIC_RELOC_LOCAL_SECTDIFF;
rinfo->r_pcrel = 0;
rinfo->r_length = rel->howto->size;
rinfo->r_extern = 0;
rinfo->r_value = rel->addend;
break;
case BFD_RELOC_MACH_O_PAIR:
rinfo->r_address = 0;
rinfo->r_scattered = 1;
rinfo->r_type = BFD_MACH_O_GENERIC_RELOC_PAIR;
rinfo->r_pcrel = 0;
rinfo->r_length = rel->howto->size;
rinfo->r_extern = 0;
rinfo->r_value = rel->addend;
break;
default:
return FALSE;
}
return TRUE;
}
static reloc_howto_type *
bfd_mach_o_i386_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
bfd_reloc_code_real_type code)
{
unsigned int i;
for (i = 0; i < sizeof (i386_howto_table) / sizeof (*i386_howto_table); i++)
if (code == i386_howto_table[i].type)
return &i386_howto_table[i];
return NULL;
}
static reloc_howto_type *
bfd_mach_o_i386_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
const char *name ATTRIBUTE_UNUSED)
{
return NULL;
}
static bfd_boolean
bfd_mach_o_i386_print_thread (bfd *abfd, bfd_mach_o_thread_flavour *thread,
void *vfile, char *buf)
{
FILE *file = (FILE *)vfile;
switch (thread->flavour)
{
case BFD_MACH_O_x86_THREAD_STATE:
if (thread->size < (8 + 16 * 4))
return FALSE;
fprintf (file, " x86_THREAD_STATE:\n");
fprintf (file, " flavor: 0x%08lx count: 0x%08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 0),
(unsigned long)bfd_get_32 (abfd, buf + 4));
fprintf (file, " eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 8),
(unsigned long)bfd_get_32 (abfd, buf + 12),
(unsigned long)bfd_get_32 (abfd, buf + 16),
(unsigned long)bfd_get_32 (abfd, buf + 20));
fprintf (file, " edi: %08lx esi: %08lx ebp: %08lx esp: %08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 24),
(unsigned long)bfd_get_32 (abfd, buf + 28),
(unsigned long)bfd_get_32 (abfd, buf + 32),
(unsigned long)bfd_get_32 (abfd, buf + 36));
fprintf (file, " ss: %08lx flg: %08lx eip: %08lx cs: %08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 40),
(unsigned long)bfd_get_32 (abfd, buf + 44),
(unsigned long)bfd_get_32 (abfd, buf + 48),
(unsigned long)bfd_get_32 (abfd, buf + 52));
fprintf (file, " ds: %08lx es: %08lx fs: %08lx gs: %08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 56),
(unsigned long)bfd_get_32 (abfd, buf + 60),
(unsigned long)bfd_get_32 (abfd, buf + 64),
(unsigned long)bfd_get_32 (abfd, buf + 68));
return TRUE;
case BFD_MACH_O_x86_FLOAT_STATE:
if (thread->size < 8)
return FALSE;
fprintf (file, " x86_FLOAT_STATE:\n");
fprintf (file, " flavor: 0x%08lx count: 0x%08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 0),
(unsigned long)bfd_get_32 (abfd, buf + 4));
return TRUE;
case BFD_MACH_O_x86_EXCEPTION_STATE:
if (thread->size < 8 + 3 * 4)
return FALSE;
fprintf (file, " x86_EXCEPTION_STATE:\n");
fprintf (file, " flavor: 0x%08lx count: 0x%08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 0),
(unsigned long)bfd_get_32 (abfd, buf + 4));
fprintf (file, " trapno: %08lx err: %08lx faultaddr: %08lx\n",
(unsigned long)bfd_get_32 (abfd, buf + 8),
(unsigned long)bfd_get_32 (abfd, buf + 12),
(unsigned long)bfd_get_32 (abfd, buf + 16));
return TRUE;
default:
break;
}
return FALSE;
}
static const mach_o_section_name_xlat text_section_names_xlat[] =
{
{ ".symbol_stub", "__symbol_stub",
SEC_CODE | SEC_LOAD, BFD_MACH_O_S_SYMBOL_STUBS,
BFD_MACH_O_S_ATTR_PURE_INSTRUCTIONS,
0},
{ ".picsymbol_stub", "__picsymbol_stub",
SEC_CODE | SEC_LOAD, BFD_MACH_O_S_SYMBOL_STUBS,
BFD_MACH_O_S_ATTR_PURE_INSTRUCTIONS,
0},
{ NULL, NULL, 0, 0, 0, 0}
};
static const mach_o_section_name_xlat data_section_names_xlat[] =
{
/* The first two are recognized by i386, but not emitted for x86 by
modern GCC. */
{ ".non_lazy_symbol_pointer", "__nl_symbol_ptr",
SEC_DATA | SEC_LOAD, BFD_MACH_O_S_NON_LAZY_SYMBOL_POINTERS,
BFD_MACH_O_S_ATTR_NONE, 2},
{ ".lazy_symbol_pointer", "__la_symbol_ptr",
SEC_DATA | SEC_LOAD, BFD_MACH_O_S_LAZY_SYMBOL_POINTERS,
BFD_MACH_O_S_ATTR_NONE, 2},
{ ".lazy_symbol_pointer2", "__la_sym_ptr2",
SEC_DATA | SEC_LOAD, BFD_MACH_O_S_LAZY_SYMBOL_POINTERS,
BFD_MACH_O_S_ATTR_NONE, 2},
{ ".lazy_symbol_pointer3", "__la_sym_ptr3",
SEC_DATA | SEC_LOAD, BFD_MACH_O_S_LAZY_SYMBOL_POINTERS,
BFD_MACH_O_S_ATTR_NONE, 2},
{ NULL, NULL, 0, 0, 0, 0}
};
static const mach_o_section_name_xlat import_section_names_xlat[] =
{
{ ".picsymbol_stub3", "__jump_table",
SEC_CODE | SEC_LOAD, BFD_MACH_O_S_SYMBOL_STUBS,
BFD_MACH_O_S_ATTR_PURE_INSTRUCTIONS
| BFD_MACH_O_S_SELF_MODIFYING_CODE,
6},
{ ".non_lazy_symbol_pointer_x86", "__pointers",
SEC_DATA | SEC_LOAD, BFD_MACH_O_S_NON_LAZY_SYMBOL_POINTERS,
BFD_MACH_O_S_ATTR_NONE, 2},
{ NULL, NULL, 0, 0, 0, 0}
};
const mach_o_segment_name_xlat mach_o_i386_segsec_names_xlat[] =
{
{ "__TEXT", text_section_names_xlat },
{ "__DATA", data_section_names_xlat },
{ "__IMPORT", import_section_names_xlat },
{ NULL, NULL }
};
#define bfd_mach_o_canonicalize_one_reloc bfd_mach_o_i386_canonicalize_one_reloc
#define bfd_mach_o_swap_reloc_out bfd_mach_o_i386_swap_reloc_out
#define bfd_mach_o_print_thread bfd_mach_o_i386_print_thread
#define bfd_mach_o_tgt_seg_table mach_o_i386_segsec_names_xlat
#define bfd_mach_o_section_type_valid_for_tgt NULL
#define bfd_mach_o_bfd_reloc_type_lookup bfd_mach_o_i386_bfd_reloc_type_lookup
#define bfd_mach_o_bfd_reloc_name_lookup bfd_mach_o_i386_bfd_reloc_name_lookup
#define TARGET_NAME i386_mach_o_vec
#define TARGET_STRING "mach-o-i386"
#define TARGET_ARCHITECTURE bfd_arch_i386
#define TARGET_PAGESIZE 4096
#define TARGET_BIG_ENDIAN 0
#define TARGET_ARCHIVE 0
#define TARGET_PRIORITY 0
#include "mach-o-target.c"
| gpl-2.0 |
xiaoleili/linux-mediatek | drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 525 | 119487 | /*************************************************************************
* myri10ge.c: Myricom Myri-10G Ethernet driver.
*
* Copyright (C) 2005 - 2011 Myricom, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Myricom, Inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* If the eeprom on your board is not recent enough, you will need to get a
* newer firmware image at:
* http://www.myri.com/scs/download-Myri10GE.html
*
* Contact Information:
* <help@myri.com>
* Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
*************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/dca.h>
#include <linux/ip.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
#define MYRI10GE_VERSION_STR "1.5.3-1.534"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
MODULE_VERSION(MYRI10GE_VERSION_STR);
MODULE_LICENSE("Dual BSD/GPL");
#define MYRI10GE_MAX_ETHER_MTU 9014
#define MYRI10GE_ETH_STOPPED 0
#define MYRI10GE_ETH_STOPPING 1
#define MYRI10GE_ETH_STARTING 2
#define MYRI10GE_ETH_RUNNING 3
#define MYRI10GE_ETH_OPEN_FAILED 4
#define MYRI10GE_EEPROM_STRINGS_SIZE 256
#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
#define MYRI10GE_ALLOC_ORDER 0
#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
#define MYRI10GE_MAX_SLICES 32
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
u32 data0;
u32 data1;
u32 data2;
};
struct myri10ge_rx_buf {
struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
struct page *page;
dma_addr_t bus;
int page_offset;
int cnt;
int fill_cnt;
int alloc_fail;
int mask; /* number of rx slots -1 */
int watchdog_needed;
};
struct myri10ge_tx_buf {
struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
__be32 __iomem *send_go; /* "go" doorbell ptr */
__be32 __iomem *send_stop; /* "stop" doorbell ptr */
struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
int mask; /* number of transmit slots -1 */
int req ____cacheline_aligned; /* transmit slots submitted */
int pkt_start; /* packets started */
int stop_queue;
int linearized;
int done ____cacheline_aligned; /* transmit slots completed */
int pkt_done; /* packets completed */
int wake_queue;
int queue_active;
};
struct myri10ge_rx_done {
struct mcp_slot *entry;
dma_addr_t bus;
int cnt;
int idx;
};
struct myri10ge_slice_netstats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_dropped;
unsigned long tx_dropped;
};
struct myri10ge_slice_state {
struct myri10ge_tx_buf tx; /* transmit ring */
struct myri10ge_rx_buf rx_small;
struct myri10ge_rx_buf rx_big;
struct myri10ge_rx_done rx_done;
struct net_device *dev;
struct napi_struct napi;
struct myri10ge_priv *mgp;
struct myri10ge_slice_netstats stats;
__be32 __iomem *irq_claim;
struct mcp_irq_data *fw_stats;
dma_addr_t fw_stats_bus;
int watchdog_tx_done;
int watchdog_tx_req;
int watchdog_rx_done;
int stuck;
#ifdef CONFIG_MYRI10GE_DCA
int cached_dca_tag;
int cpu;
__be32 __iomem *dca_tag;
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define SLICE_STATE_IDLE 0
#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
#define SLICE_STATE_POLL 2 /* poll owns this slice */
#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
spinlock_t lock;
unsigned long lock_napi_yield;
unsigned long lock_poll_yield;
unsigned long busy_poll_miss;
unsigned long busy_poll_cnt;
#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
struct myri10ge_priv {
struct myri10ge_slice_state *ss;
int tx_boundary; /* boundary transmits cannot cross */
int num_slices;
int running; /* running? */
int small_bytes;
int big_bytes;
int max_intr_slots;
struct net_device *dev;
u8 __iomem *sram;
int sram_size;
unsigned long board_span;
unsigned long iomem_base;
__be32 __iomem *irq_deassert;
char *mac_addr_string;
struct mcp_cmd_response *cmd;
dma_addr_t cmd_bus;
struct pci_dev *pdev;
int msi_enabled;
int msix_enabled;
struct msix_entry *msix_vectors;
#ifdef CONFIG_MYRI10GE_DCA
int dca_enabled;
int relaxed_order;
#endif
u32 link_state;
unsigned int rdma_tags_available;
int intr_coal_delay;
__be32 __iomem *intr_coal_delay_ptr;
int wc_cookie;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
int watchdog_resets;
int watchdog_pause;
int pause;
bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
char fw_version[128];
int fw_ver_major;
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
u32 link_changes;
u32 msg_enable;
unsigned int board_number;
int rebooted;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
/* Careful: must be accessed under kernel_param_lock() */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
#define MYRI10GE_MAX_BOARDS 8
static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
{[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
0444);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
static int myri10ge_small_bytes = -1; /* -1 == auto */
module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
static int myri10ge_msi = 1; /* enable msi by default */
module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
static int myri10ge_intr_coal_delay = 75;
module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
static int myri10ge_flow_control = 1;
module_param(myri10ge_flow_control, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
static int myri10ge_deassert_wait = 1;
module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_deassert_wait,
"Wait when deasserting legacy interrupts");
static int myri10ge_force_firmware = 0;
module_param(myri10ge_force_firmware, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_force_firmware,
"Force firmware to assume aligned completions");
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
module_param(myri10ge_initial_mtu, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
static int myri10ge_napi_weight = 64;
module_param(myri10ge_napi_weight, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
static int myri10ge_watchdog_timeout = 1;
module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
static int myri10ge_max_irq_loops = 1048576;
module_param(myri10ge_max_irq_loops, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold");
#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
static int myri10ge_debug = -1; /* defaults above */
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
module_param(myri10ge_rss_hash, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
static int myri10ge_dca = 1;
module_param(myri10ge_dca, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
static void myri10ge_set_multicast_list(struct net_device *dev);
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev);
static inline void put_be32(__be32 val, __be32 __iomem * p)
{
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
{
if (mgp->fw_name_allocated)
kfree(mgp->fw_name);
mgp->fw_name = name;
mgp->fw_name_allocated = allocated;
}
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
{
struct mcp_cmd *buf;
char buf_bytes[sizeof(*buf) + 8];
struct mcp_cmd_response *response = mgp->cmd;
char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
u32 dma_low, dma_high, result, value;
int sleep_total = 0;
/* ensure buf is aligned to 8 bytes */
buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
buf->data0 = htonl(data->data0);
buf->data1 = htonl(data->data1);
buf->data2 = htonl(data->data2);
buf->cmd = htonl(cmd);
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf->response_addr.low = htonl(dma_low);
buf->response_addr.high = htonl(dma_high);
response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
mb();
myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
/* wait up to 15ms. Longest command is the DMA benchmark,
* which is capped at 5ms, but runs from a timeout handler
* that runs every 7.8ms. So a 15ms timeout leaves us with
* a 2.2ms margin
*/
if (atomic) {
/* if atomic is set, do not sleep,
* and try to get the completion quickly
* (1ms will be enough for those commands) */
for (sleep_total = 0;
sleep_total < 1000 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total += 10) {
udelay(10);
mb();
}
} else {
/* use msleep for most command */
for (sleep_total = 0;
sleep_total < 15 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total++)
msleep(1);
}
result = ntohl(response->result);
value = ntohl(response->data);
if (result != MYRI10GE_NO_RESPONSE_RESULT) {
if (result == 0) {
data->data0 = value;
return 0;
} else if (result == MXGEFW_CMD_UNKNOWN) {
return -ENOSYS;
} else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
return -E2BIG;
} else if (result == MXGEFW_CMD_ERROR_RANGE &&
cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
(data->
data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
0) {
return -ERANGE;
} else {
dev_err(&mgp->pdev->dev,
"command %d failed, result = %d\n",
cmd, result);
return -ENXIO;
}
}
dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
cmd, result);
return -EAGAIN;
}
/*
* The eeprom strings on the lanaiX have the format
* SN=x\0
* MAC=x:x:x:x:x:x\0
* PT:ddd mmm xx xx:xx:xx xx\0
* PV:ddd mmm xx xx:xx:xx xx\0
*/
static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
{
char *ptr, *limit;
int i;
ptr = mgp->eeprom_strings;
limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
while (*ptr != '\0' && ptr < limit) {
if (memcmp(ptr, "MAC=", 4) == 0) {
ptr += 4;
mgp->mac_addr_string = ptr;
for (i = 0; i < 6; i++) {
if ((ptr + 2) > limit)
goto abort;
mgp->mac_addr[i] =
simple_strtoul(ptr, &ptr, 16);
ptr += 1;
}
}
if (memcmp(ptr, "PC=", 3) == 0) {
ptr += 3;
mgp->product_code_string = ptr;
}
if (memcmp((const void *)ptr, "SN=", 3) == 0) {
ptr += 3;
mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
}
while (ptr < limit && *ptr++) ;
}
return 0;
abort:
dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
return -ENXIO;
}
/*
* Enable or disable periodic RDMAs from the host to make certain
* chipsets resend dropped PCIe messages
*/
static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high;
int i;
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a rdma command to the PCIe engine, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
buf[3] = htonl(dma_high); /* dummy addr MSW */
buf[4] = htonl(dma_low); /* dummy addr LSW */
buf[5] = htonl(enable); /* enable? */
submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
msleep(1);
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
(enable ? "enable" : "disable"));
}
static int
myri10ge_validate_firmware(struct myri10ge_priv *mgp,
struct mcp_gen_header *hdr)
{
struct device *dev = &mgp->pdev->dev;
/* check firmware type */
if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
return -EINVAL;
}
/* save firmware version for ethtool */
strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0';
sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
&mgp->fw_ver_minor, &mgp->fw_ver_tiny);
if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
MXGEFW_VERSION_MINOR);
return -EINVAL;
}
return 0;
}
static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
{
unsigned crc, reread_crc;
const struct firmware *fw;
struct device *dev = &mgp->pdev->dev;
unsigned char *fw_readback;
struct mcp_gen_header *hdr;
size_t hdr_offset;
int status;
unsigned i;
if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
goto abort_with_nothing;
}
/* check size */
if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
fw->size < MCP_HEADER_PTR_OFFSET + 4) {
dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
status = -EINVAL;
goto abort_with_fw;
}
/* check id */
hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
dev_err(dev, "Bad firmware file\n");
status = -EINVAL;
goto abort_with_fw;
}
hdr = (void *)(fw->data + hdr_offset);
status = myri10ge_validate_firmware(mgp, hdr);
if (status != 0)
goto abort_with_fw;
crc = crc32(~0, fw->data, fw->size);
for (i = 0; i < fw->size; i += 256) {
myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
fw->data + i,
min(256U, (unsigned)(fw->size - i)));
mb();
readb(mgp->sram);
}
fw_readback = vmalloc(fw->size);
if (!fw_readback) {
status = -ENOMEM;
goto abort_with_fw;
}
/* corruption checking is good for parity recovery and buggy chipset */
memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
reread_crc = crc32(~0, fw_readback, fw->size);
vfree(fw_readback);
if (crc != reread_crc) {
dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
(unsigned)fw->size, reread_crc, crc);
status = -EIO;
goto abort_with_fw;
}
*size = (u32) fw->size;
abort_with_fw:
release_firmware(fw);
abort_with_nothing:
return status;
}
static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
{
struct mcp_gen_header *hdr;
struct device *dev = &mgp->pdev->dev;
const size_t bytes = sizeof(struct mcp_gen_header);
size_t hdr_offset;
int status;
/* find running firmware header */
hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
dev_err(dev, "Running firmware has bad header offset (%d)\n",
(int)hdr_offset);
return -EIO;
}
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
if (hdr == NULL)
return -ENOMEM;
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
/* check to see if adopted firmware has bug where adopting
* it will cause broadcasts to be filtered unless the NIC
* is kept in ALLMULTI mode */
if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
mgp->adopted_rx_filter_bug = 1;
dev_warn(dev, "Adopting fw %d.%d.%d: "
"working around rx filter bug\n",
mgp->fw_ver_major, mgp->fw_ver_minor,
mgp->fw_ver_tiny);
}
return status;
}
static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
int status;
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
mgp->max_tso6 = cmd.data0;
mgp->features |= NETIF_F_TSO6;
}
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
return -ENXIO;
}
mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
return 0;
}
static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high, size;
int status, i;
size = 0;
status = myri10ge_load_hotplug_firmware(mgp, &size);
if (status) {
if (!adopt)
return status;
dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
/* Do not attempt to adopt firmware if there
* was a bad crc */
if (status == -EIO)
return status;
status = myri10ge_adopt_running_firmware(mgp);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to adopt running firmware\n");
return status;
}
dev_info(&mgp->pdev->dev,
"Successfully adopted running firmware\n");
if (mgp->tx_boundary == 4096) {
dev_warn(&mgp->pdev->dev,
"Using firmware currently running on NIC"
". For optimal\n");
dev_warn(&mgp->pdev->dev,
"performance consider loading optimized "
"firmware\n");
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a reload command to the bootstrap MCP, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
/* FIX: All newest firmware should un-protect the bottom of
* the sram before handoff. However, the very first interfaces
* do not. Therefore the handoff copy must skip the first 8 bytes
*/
buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
buf[4] = htonl(size - 8); /* length of code */
buf[5] = htonl(8); /* where to copy to */
buf[6] = htonl(0); /* where to jump to */
submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
mb();
msleep(1);
mb();
i = 0;
while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
msleep(1 << i);
i++;
}
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
dev_err(&mgp->pdev->dev, "handoff failed\n");
return -ENXIO;
}
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
return status;
}
static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
netdev_err(mgp->dev, "Failed to set flow control mode\n");
return status;
}
mgp->pause = pause;
return 0;
}
static void
myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
netdev_err(mgp->dev, "Failed to set promisc mode\n");
}
static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
{
struct myri10ge_cmd cmd;
int status;
u32 len;
struct page *dmatest_page;
dma_addr_t dmatest_bus;
char *test = " ";
dmatest_page = alloc_page(GFP_KERNEL);
if (!dmatest_page)
return -ENOMEM;
dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
__free_page(dmatest_page);
return -ENOMEM;
}
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
* to do DMA read, write, or read+write tests. The
* results are returned in cmd.data0. The upper 16
* bits or the return is the number of transfers completed.
* The lower 16 bits is the time in 0.5us ticks that the
* transfers took to complete.
*/
len = mgp->tx_boundary;
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10000;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read";
goto abort;
}
mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x1;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "write";
goto abort;
}
mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10001;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read/write";
goto abort;
}
mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
(cmd.data0 & 0xffff);
abort:
pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
put_page(dmatest_page);
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
test, status);
return status;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
{
spin_lock_init(&ss->lock);
ss->state = SLICE_STATE_IDLE;
}
static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
{
bool rc = true;
spin_lock(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
WARN_ON((ss->state & SLICE_STATE_NAPI));
ss->state |= SLICE_STATE_NAPI_YIELD;
rc = false;
ss->lock_napi_yield++;
} else
ss->state = SLICE_STATE_NAPI;
spin_unlock(&ss->lock);
return rc;
}
static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
{
spin_lock(&ss->lock);
WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
ss->state = SLICE_STATE_IDLE;
spin_unlock(&ss->lock);
}
static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
{
bool rc = true;
spin_lock_bh(&ss->lock);
if ((ss->state & SLICE_LOCKED)) {
ss->state |= SLICE_STATE_POLL_YIELD;
rc = false;
ss->lock_poll_yield++;
} else
ss->state |= SLICE_STATE_POLL;
spin_unlock_bh(&ss->lock);
return rc;
}
static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
{
spin_lock_bh(&ss->lock);
WARN_ON((ss->state & SLICE_STATE_NAPI));
ss->state = SLICE_STATE_IDLE;
spin_unlock_bh(&ss->lock);
}
static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
{
WARN_ON(!(ss->state & SLICE_LOCKED));
return (ss->state & SLICE_USER_PEND);
}
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
{
}
static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
{
return false;
}
static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
{
}
static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
{
return false;
}
static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
{
}
static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
{
return false;
}
#endif
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int i, status;
size_t bytes;
#ifdef CONFIG_MYRI10GE_DCA
unsigned long dca_tag_off;
#endif
/* try to send a reset command to the card to see if it
* is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
return -ENXIO;
}
(void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
/*
* Use non-ndis mcp_slot (eg, 4 bytes total,
* no toeplitz hash value returned. Older firmware will
* not understand this command, but will use the correct
* sized mcp_slot, so we ignore error returns
*/
cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
(void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
/* Now exchange information about interrupts */
bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
cmd.data0 = (u32) bytes;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
/*
* Even though we already know how many slices are supported
* via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
* has magic side effects, and must be called after a reset.
* It must be called prior to calling any RSS related cmds,
* including assigning an interrupt queue for anything but
* slice 0. It must also be called *after*
* MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
* the firmware to compute offsets.
*/
if (mgp->num_slices > 1) {
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to get number of slices\n");
}
/*
* MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
* to setting up the interrupt queue DMA
*/
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
/* Firmware older than 1.4.32 only supports multiple
* RX queues, so if we get an error, first retry using a
* single TX queue before giving up */
if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
netif_set_real_num_tx_queues(mgp->dev, 1);
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
}
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to set number of slices\n");
return status;
}
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
cmd.data2 = i;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
&cmd, 0);
}
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
ss->irq_claim =
(__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
}
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd, 0);
mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
status |= myri10ge_send_cmd
(mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
return status;
}
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
#ifdef CONFIG_MYRI10GE_DCA
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
dca_tag_off = cmd.data0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (status == 0) {
ss->dca_tag = (__iomem __be32 *)
(mgp->sram + dca_tag_off + 4 * i);
} else {
ss->dca_tag = NULL;
}
}
#endif /* CONFIG_MYRI10GE_DCA */
/* reset mcp/driver shared state back to 0 */
mgp->link_changes = 0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
memset(ss->rx_done.entry, 0, bytes);
ss->tx.req = 0;
ss->tx.done = 0;
ss->tx.pkt_start = 0;
ss->tx.pkt_done = 0;
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_done.idx = 0;
ss->rx_done.cnt = 0;
ss->tx.wake_queue = 0;
ss->tx.stop_queue = 0;
}
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_pause(mgp, mgp->pause);
myri10ge_set_multicast_list(mgp->dev);
return status;
}
#ifdef CONFIG_MYRI10GE_DCA
static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
{
int ret;
u16 ctl;
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
if (ret != on) {
ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
ctl |= (on << 4);
pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
}
return ret;
}
static void
myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
{
ss->cached_dca_tag = tag;
put_be32(htonl(tag), ss->dca_tag);
}
static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
{
int cpu = get_cpu();
int tag;
if (cpu != ss->cpu) {
tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
if (ss->cached_dca_tag != tag)
myri10ge_write_dca(ss, cpu, tag);
ss->cpu = cpu;
}
put_cpu();
}
static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
{
int err, i;
struct pci_dev *pdev = mgp->pdev;
if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
return;
if (!myri10ge_dca) {
dev_err(&pdev->dev, "dca disabled by administrator\n");
return;
}
err = dca_add_requester(&pdev->dev);
if (err) {
if (err != -ENODEV)
dev_err(&pdev->dev,
"dca_add_requester() failed, err=%d\n", err);
return;
}
mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
mgp->dca_enabled = 1;
for (i = 0; i < mgp->num_slices; i++) {
mgp->ss[i].cpu = -1;
mgp->ss[i].cached_dca_tag = -1;
myri10ge_update_dca(&mgp->ss[i]);
}
}
static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
if (!mgp->dca_enabled)
return;
mgp->dca_enabled = 0;
if (mgp->relaxed_order)
myri10ge_toggle_relaxed(pdev, 1);
dca_remove_requester(&pdev->dev);
}
static int myri10ge_notify_dca_device(struct device *dev, void *data)
{
struct myri10ge_priv *mgp;
unsigned long event;
mgp = dev_get_drvdata(dev);
event = *(unsigned long *)data;
if (event == DCA_PROVIDER_ADD)
myri10ge_setup_dca(mgp);
else if (event == DCA_PROVIDER_REMOVE)
myri10ge_teardown_dca(mgp);
return 0;
}
#endif /* CONFIG_MYRI10GE_DCA */
static inline void
myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
struct mcp_kreq_ether_recv *src)
{
__be32 low;
low = src->addr_low;
src->addr_low = htonl(DMA_BIT_MASK(32));
myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
mb();
myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
mb();
src->addr_low = low;
put_be32(low, &dst->addr_low);
mb();
}
static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
{
struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
if ((skb->protocol == htons(ETH_P_8021Q)) &&
(vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
skb->csum = hw_csum;
skb->ip_summed = CHECKSUM_COMPLETE;
}
}
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
{
struct page *page;
dma_addr_t bus;
int idx;
#if MYRI10GE_ALLOC_SIZE > 4096
int end_offset;
#endif
if (unlikely(rx->watchdog_needed && !watchdog))
return;
/* try to refill entire ring */
while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
idx = rx->fill_cnt & rx->mask;
if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
/* we can use part of previous page */
get_page(rx->page);
} else {
/* we need a new page */
page =
alloc_pages(GFP_ATOMIC | __GFP_COMP,
MYRI10GE_ALLOC_ORDER);
if (unlikely(page == NULL)) {
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
bus = pci_map_page(mgp->pdev, page, 0,
MYRI10GE_ALLOC_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
__free_pages(page, MYRI10GE_ALLOC_ORDER);
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
rx->page = page;
rx->page_offset = 0;
rx->bus = bus;
}
rx->info[idx].page = rx->page;
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
/* start next packet on a cacheline boundary */
rx->page_offset += SKB_DATA_ALIGN(bytes);
#if MYRI10GE_ALLOC_SIZE > 4096
/* don't cross a 4KB boundary */
end_offset = rx->page_offset + bytes - 1;
if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
rx->page_offset = end_offset & ~4095;
#endif
rx->fill_cnt++;
/* copy 8 descriptors to the firmware at a time */
if ((idx & 7) == 7) {
myri10ge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
}
}
static inline void
myri10ge_unmap_rx_page(struct pci_dev *pdev,
struct myri10ge_rx_buffer_state *info, int bytes)
{
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
}
/*
* GRO does not support acceleration of tagged vlan frames, and
* this NIC does not support vlan tag offload, so we must pop
* the tag ourselves to be able to achieve GRO performance that
* is comparable to LRO.
*/
static inline void
myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
{
u8 *va;
struct vlan_ethhdr *veh;
struct skb_frag_struct *frag;
__wsum vsum;
va = addr;
va += MXGEFW_PAD;
veh = (struct vlan_ethhdr *)va;
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
NETIF_F_HW_VLAN_CTAG_RX &&
veh->h_vlan_proto == htons(ETH_P_8021Q)) {
/* fixup csum if needed */
if (skb->ip_summed == CHECKSUM_COMPLETE) {
vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
skb->csum = csum_sub(skb->csum, vsum);
}
/* pop tag */
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
frag = skb_shinfo(skb)->frags;
frag->page_offset += VLAN_HLEN;
skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
}
}
#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct skb_frag_struct *rx_frags;
struct myri10ge_rx_buf *rx;
int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
bytes = mgp->small_bytes;
} else {
rx = &ss->rx_big;
bytes = mgp->big_bytes;
}
len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
/* When busy polling in user context, allocate skb and copy headers to
* skb's linear memory ourselves. When not busy polling, use the napi
* gro api.
*/
polling = myri10ge_ss_busy_polling(ss);
if (polling)
skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
else
skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
put_page(rx->info[idx].page);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
return 0;
}
rx_frags = skb_shinfo(skb)->frags;
/* Fill skb_frag_struct(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
skb_fill_page_desc(skb, i, rx->info[idx].page,
rx->info[idx].page_offset,
remainder < MYRI10GE_ALLOC_SIZE ?
remainder : MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
/* remove padding */
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
skb->len = len;
skb->data_len = len;
skb->truesize += len;
if (dev->features & NETIF_F_RXCSUM) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum;
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
skb_mark_napi_id(skb, &ss->napi);
if (polling) {
int hlen;
/* myri10ge_vlan_rx might have moved the header, so compute
* length and address again.
*/
hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
va = page_address(skb_frag_page(&rx_frags[0])) +
rx_frags[0].page_offset;
/* Copy header into the skb linear memory */
skb_copy_to_linear_data(skb, va, hlen);
rx_frags[0].page_offset += hlen;
rx_frags[0].size -= hlen;
skb->data_len -= hlen;
skb->tail += hlen;
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
}
else
napi_gro_frags(&ss->napi);
return 1;
}
static inline void
myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
{
struct pci_dev *pdev = ss->mgp->pdev;
struct myri10ge_tx_buf *tx = &ss->tx;
struct netdev_queue *dev_queue;
struct sk_buff *skb;
int idx, len;
while (tx->pkt_done != mcp_index) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
if (tx->info[idx].last) {
tx->pkt_done++;
tx->info[idx].last = 0;
}
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
}
dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
/*
* Make a minimal effort to prevent the NIC from polling an
* idle tx queue. If we can't get the lock we leave the queue
* active. In this case, either a thread was about to start
* using the queue anyway, or we lost a race and the NIC will
* waste some of its resources polling an inactive queue for a
* while.
*/
if ((ss->mgp->dev->real_num_tx_queues > 1) &&
__netif_tx_trylock(dev_queue)) {
if (tx->req == tx->done) {
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
mmiowb();
}
__netif_tx_unlock(dev_queue);
}
/* start the queue if we've stopped it */
if (netif_tx_queue_stopped(dev_queue) &&
tx->req - tx->done < (tx->mask >> 1) &&
ss->mgp->running == MYRI10GE_ETH_RUNNING) {
tx->wake_queue++;
netif_tx_wake_queue(dev_queue);
}
}
static inline int
myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
int idx = rx_done->idx;
int cnt = rx_done->cnt;
int work_done = 0;
u16 length;
__wsum checksum;
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
rx_ok = myri10ge_rx_done(ss, length, checksum);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
idx = cnt & (mgp->max_intr_slots - 1);
work_done++;
}
rx_done->idx = idx;
rx_done->cnt = cnt;
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
/* restock receive rings if needed */
if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
return work_done;
}
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
{
struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
if (unlikely(stats->stats_updated)) {
unsigned link_up = ntohl(stats->link_up);
if (mgp->link_state != link_up) {
mgp->link_state = link_up;
if (mgp->link_state == MXGEFW_LINK_UP) {
netif_info(mgp, link, mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
netif_info(mgp, link, mgp->dev, "link %s\n",
(link_up == MXGEFW_LINK_MYRINET ?
"mismatch (Myrinet detected)" :
"down"));
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
}
if (mgp->rdma_tags_available !=
ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
ntohl(stats->rdma_tags_available);
netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
mgp->rdma_tags_available);
}
mgp->down_cnt += stats->link_down;
if (stats->link_down)
wake_up(&mgp->down_wq);
}
}
static int myri10ge_poll(struct napi_struct *napi, int budget)
{
struct myri10ge_slice_state *ss =
container_of(napi, struct myri10ge_slice_state, napi);
int work_done;
#ifdef CONFIG_MYRI10GE_DCA
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
/* Try later if the busy_poll handler is running. */
if (!myri10ge_ss_lock_napi(ss))
return budget;
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
static int myri10ge_busy_poll(struct napi_struct *napi)
{
struct myri10ge_slice_state *ss =
container_of(napi, struct myri10ge_slice_state, napi);
struct myri10ge_priv *mgp = ss->mgp;
int work_done;
/* Poll only when the link is up */
if (mgp->link_state != MXGEFW_LINK_UP)
return LL_FLUSH_FAILED;
if (!myri10ge_ss_lock_poll(ss))
return LL_FLUSH_BUSY;
/* Process a small number of packets */
work_done = myri10ge_clean_rx_done(ss, 4);
if (work_done)
ss->busy_poll_cnt += work_done;
else
ss->busy_poll_miss++;
myri10ge_ss_unlock_poll(ss);
return work_done;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
struct myri10ge_priv *mgp = ss->mgp;
struct mcp_irq_data *stats = ss->fw_stats;
struct myri10ge_tx_buf *tx = &ss->tx;
u32 send_done_count;
int i;
/* an interrupt on a non-zero receive-only slice is implicitly
* valid since MSI-X irqs are not shared */
if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
napi_schedule(&ss->napi);
return IRQ_HANDLED;
}
/* make sure it is our IRQ, and that the DMA has finished */
if (unlikely(!stats->valid))
return IRQ_NONE;
/* low bit indicates receives are present, so schedule
* napi poll handler */
if (stats->valid & 1)
napi_schedule(&ss->napi);
if (!mgp->msi_enabled && !mgp->msix_enabled) {
put_be32(0, mgp->irq_deassert);
if (!myri10ge_deassert_wait)
stats->valid = 0;
mb();
} else
stats->valid = 0;
/* Wait for IRQ line to go low, if using INTx */
i = 0;
while (1) {
i++;
/* check for transmit completes and receives */
send_done_count = ntohl(stats->send_done_count);
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
netdev_warn(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
if (likely(stats->valid == 0))
break;
cpu_relax();
barrier();
}
/* Only slice 0 updates stats */
if (ss == mgp->ss)
myri10ge_check_statblock(mgp);
put_be32(htonl(3), ss->irq_claim + 1);
return IRQ_HANDLED;
}
static int
myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
char *ptr;
int i;
cmd->autoneg = AUTONEG_DISABLE;
ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
/*
* parse the product code to deterimine the interface type
* (CX4, XFP, Quad Ribbon Fiber) by looking at the character
* after the 3rd dash in the driver's cached copy of the
* EEPROM's product code string.
*/
ptr = mgp->product_code_string;
if (ptr == NULL) {
netdev_err(netdev, "Missing product code\n");
return 0;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
netdev_err(netdev, "Invalid product code %s\n",
mgp->product_code_string);
return 0;
}
}
if (*ptr == '2')
ptr++;
if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
/* We've found either an XFP, quad ribbon fiber, or SFP+ */
cmd->port = PORT_FIBRE;
cmd->supported |= SUPPORTED_FIBRE;
cmd->advertising |= ADVERTISED_FIBRE;
} else {
cmd->port = PORT_OTHER;
}
if (*ptr == 'R' || *ptr == 'S')
cmd->transceiver = XCVR_EXTERNAL;
else
cmd->transceiver = XCVR_INTERNAL;
return 0;
}
static void
myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
strlcpy(info->driver, "myri10ge", sizeof(info->driver));
strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int
myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
coal->rx_coalesce_usecs = mgp->intr_coal_delay;
return 0;
}
static int
myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->intr_coal_delay = coal->rx_coalesce_usecs;
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
return 0;
}
static void
myri10ge_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
pause->autoneg = 0;
pause->rx_pause = mgp->pause;
pause->tx_pause = mgp->pause;
}
static int
myri10ge_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
if (pause->tx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->tx_pause);
if (pause->rx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->rx_pause);
if (pause->autoneg != 0)
return -EINVAL;
return 0;
}
static void
myri10ge_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
ring->rx_jumbo_max_pending = 0;
ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
ring->rx_mini_pending = ring->rx_mini_max_pending;
ring->rx_pending = ring->rx_max_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
ring->tx_pending = ring->tx_max_pending;
}
static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
"tx_boundary", "irq", "MSI", "MSIX",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "watchdog_resets",
#ifdef CONFIG_MYRI10GE_DCA
"dca_capable_firmware", "dca_device_present",
#endif
"link_changes", "link_up", "dropped_link_overflow",
"dropped_link_error_or_filtered",
"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
"dropped_unicast_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
"dropped_no_big_buffer"
};
static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"----------- slice ---------",
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
#ifdef CONFIG_NET_RX_BUSY_POLL
"rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
"rx_busy_poll_cnt",
#endif
};
#define MYRI10GE_NET_STATS_LEN 21
#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
static void
myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
int i;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *myri10ge_gstrings_main_stats,
sizeof(myri10ge_gstrings_main_stats));
data += sizeof(myri10ge_gstrings_main_stats);
for (i = 0; i < mgp->num_slices; i++) {
memcpy(data, *myri10ge_gstrings_slice_stats,
sizeof(myri10ge_gstrings_slice_stats));
data += sizeof(myri10ge_gstrings_slice_stats);
}
break;
}
}
static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
return MYRI10GE_MAIN_STATS_LEN +
mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void
myri10ge_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
struct myri10ge_slice_state *ss;
struct rtnl_link_stats64 link_stats;
int slice;
int i;
/* force stats update */
memset(&link_stats, 0, sizeof(link_stats));
(void)myri10ge_get_stats(netdev, &link_stats);
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((u64 *)&link_stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->msix_enabled;
data[i++] = (unsigned int)mgp->read_dma;
data[i++] = (unsigned int)mgp->write_dma;
data[i++] = (unsigned int)mgp->read_write_dma;
data[i++] = (unsigned int)mgp->serial_number;
data[i++] = (unsigned int)mgp->watchdog_resets;
#ifdef CONFIG_MYRI10GE_DCA
data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
data[i++] = (unsigned int)(mgp->dca_enabled);
#endif
data[i++] = (unsigned int)mgp->link_changes;
/* firmware stats are useful only in the first slice */
ss = &mgp->ss[0];
data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
data[i++] = slice;
data[i++] = (unsigned int)ss->tx.pkt_start;
data[i++] = (unsigned int)ss->tx.pkt_done;
data[i++] = (unsigned int)ss->tx.req;
data[i++] = (unsigned int)ss->tx.done;
data[i++] = (unsigned int)ss->rx_small.cnt;
data[i++] = (unsigned int)ss->rx_big.cnt;
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
#ifdef CONFIG_NET_RX_BUSY_POLL
data[i++] = ss->lock_napi_yield;
data[i++] = ss->lock_poll_yield;
data[i++] = ss->busy_poll_miss;
data[i++] = ss->busy_poll_cnt;
#endif
}
}
static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->msg_enable = value;
}
static u32 myri10ge_get_msglevel(struct net_device *netdev)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
return mgp->msg_enable;
}
/*
* Use a low-level command to change the LED behavior. Rather than
* blinking (which is the normal case), when identify is used, the
* yellow LED turns solid.
*/
static int myri10ge_led(struct myri10ge_priv *mgp, int on)
{
struct mcp_gen_header *hdr;
struct device *dev = &mgp->pdev->dev;
size_t hdr_off, pattern_off, hdr_len;
u32 pattern = 0xfffffffe;
/* find running firmware header */
hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
dev_err(dev, "Running firmware has bad header offset (%d)\n",
(int)hdr_off);
return -EIO;
}
hdr_len = swab32(readl(mgp->sram + hdr_off +
offsetof(struct mcp_gen_header, header_length)));
pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
if (pattern_off >= (hdr_len + hdr_off)) {
dev_info(dev, "Firmware does not support LED identification\n");
return -EINVAL;
}
if (!on)
pattern = swab32(readl(mgp->sram + pattern_off + 4));
writel(swab32(pattern), mgp->sram + pattern_off);
return 0;
}
static int
myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
int rc;
switch (state) {
case ETHTOOL_ID_ACTIVE:
rc = myri10ge_led(mgp, 1);
break;
case ETHTOOL_ID_INACTIVE:
rc = myri10ge_led(mgp, 0);
break;
default:
rc = -EINVAL;
}
return rc;
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
.get_pauseparam = myri10ge_get_pauseparam,
.set_pauseparam = myri10ge_set_pauseparam,
.get_ringparam = myri10ge_get_ringparam,
.get_link = ethtool_op_get_link,
.get_strings = myri10ge_get_strings,
.get_sset_count = myri10ge_get_sset_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
.set_phys_id = myri10ge_phys_id,
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct myri10ge_cmd cmd;
struct net_device *dev = mgp->dev;
int tx_ring_size, rx_ring_size;
int tx_ring_entries, rx_ring_entries;
int i, slice, status;
size_t bytes;
/* get ring sizes */
slice = ss - mgp->ss;
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0)
return status;
rx_ring_size = cmd.data0;
tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
ss->tx.mask = tx_ring_entries - 1;
ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
status = -ENOMEM;
/* allocate the host shadow rings */
bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
* sizeof(*ss->tx.req_list);
ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.req_bytes == NULL)
goto abort_with_nothing;
/* ensure req_list entries are aligned to 8 bytes */
ss->tx.req_list = (struct mcp_kreq_ether_send *)
ALIGN((unsigned long)ss->tx.req_bytes, 8);
ss->tx.queue_active = 0;
bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.shadow == NULL)
goto abort_with_tx_req_bytes;
bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.shadow == NULL)
goto abort_with_rx_small_shadow;
/* allocate the host info rings */
bytes = tx_ring_entries * sizeof(*ss->tx.info);
ss->tx.info = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.info == NULL)
goto abort_with_rx_big_shadow;
bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.info == NULL)
goto abort_with_tx_info;
bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.info == NULL)
goto abort_with_rx_small_info;
/* Fill the receive rings */
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_big.fill_cnt = 0;
ss->rx_small.fill_cnt = 0;
ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_small.watchdog_needed = 0;
ss->rx_big.watchdog_needed = 0;
if (mgp->small_bytes == 0) {
ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
} else {
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
}
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
slice, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
slice, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
return 0;
abort_with_rx_big_ring:
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
int idx = i & ss->rx_big.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
abort_with_rx_small_ring:
if (mgp->small_bytes == 0)
ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
int idx = i & ss->rx_small.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
kfree(ss->rx_big.info);
abort_with_rx_small_info:
kfree(ss->rx_small.info);
abort_with_tx_info:
kfree(ss->tx.info);
abort_with_rx_big_shadow:
kfree(ss->rx_big.shadow);
abort_with_rx_small_shadow:
kfree(ss->rx_small.shadow);
abort_with_tx_req_bytes:
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
abort_with_nothing:
return status;
}
static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct myri10ge_tx_buf *tx;
int i, len, idx;
/* If not allocated, skip it */
if (ss->tx.req_list == NULL)
return;
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
idx = i & ss->rx_big.mask;
if (i == ss->rx_big.fill_cnt - 1)
ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
if (mgp->small_bytes == 0)
ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
idx = i & ss->rx_small.mask;
if (i == ss->rx_small.fill_cnt - 1)
ss->rx_small.info[idx].page_offset =
MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
tx = &ss->tx;
while (tx->done != tx->req) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
}
kfree(ss->rx_big.info);
kfree(ss->rx_small.info);
kfree(ss->tx.info);
kfree(ss->rx_big.shadow);
kfree(ss->rx_small.shadow);
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
}
static int myri10ge_request_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct myri10ge_slice_state *ss;
struct net_device *netdev = mgp->dev;
int i;
int status;
mgp->msi_enabled = 0;
mgp->msix_enabled = 0;
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
status = pci_enable_msix_range(pdev, mgp->msix_vectors,
mgp->num_slices, mgp->num_slices);
if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
if (status != 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI; falling back to xPIC\n",
status);
} else {
mgp->msi_enabled = 1;
}
}
}
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
snprintf(ss->irq_desc, sizeof(ss->irq_desc),
"%s:slice-%d", netdev->name, i);
status = request_irq(mgp->msix_vectors[i].vector,
myri10ge_intr, 0, ss->irq_desc,
ss);
if (status != 0) {
dev_err(&pdev->dev,
"slice %d failed to allocate IRQ\n", i);
i--;
while (i >= 0) {
free_irq(mgp->msix_vectors[i].vector,
&mgp->ss[i]);
i--;
}
pci_disable_msix(pdev);
return status;
}
}
} else {
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
mgp->dev->name, &mgp->ss[0]);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
if (mgp->msi_enabled)
pci_disable_msi(pdev);
}
}
return status;
}
static void myri10ge_free_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int i;
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++)
free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
} else {
free_irq(pdev->irq, &mgp->ss[0]);
}
if (mgp->msi_enabled)
pci_disable_msi(pdev);
if (mgp->msix_enabled)
pci_disable_msix(pdev);
}
static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
status = 0;
if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
&cmd, 0);
ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
(mgp->sram + cmd.data0);
}
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
&cmd, 0);
ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
ss->tx.send_go = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
ss->tx.send_stop = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
return status;
}
static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
if (status == -ENOSYS) {
dma_addr_t bus = ss->fw_stats_bus;
if (slice != 0)
return -EINVAL;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
&cmd, 0);
/* Firmware cannot support multicast without STATS_DMA_V2 */
mgp->fw_multicast_support = 0;
} else {
mgp->fw_multicast_support = 1;
}
return 0;
}
static int myri10ge_open(struct net_device *dev)
{
struct myri10ge_slice_state *ss;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int i, status, big_pow2, slice;
u8 __iomem *itable;
if (mgp->running != MYRI10GE_ETH_STOPPED)
return -EBUSY;
mgp->running = MYRI10GE_ETH_STARTING;
status = myri10ge_reset(mgp);
if (status != 0) {
netdev_err(dev, "failed reset\n");
goto abort_with_nothing;
}
if (mgp->num_slices > 1) {
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to set number of slices\n");
goto abort_with_nothing;
}
/* setup the indirection table */
cmd.data0 = mgp->num_slices;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
&cmd, 0);
status |= myri10ge_send_cmd(mgp,
MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to setup rss tables\n");
goto abort_with_nothing;
}
/* just enable an identity mapping */
itable = mgp->sram + cmd.data0;
for (i = 0; i < mgp->num_slices; i++)
__raw_writeb(i, &itable[i]);
cmd.data0 = 1;
cmd.data1 = myri10ge_rss_hash;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to enable slices\n");
goto abort_with_nothing;
}
}
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_nothing;
/* decide what small buffer size to use. For good TCP rx
* performance, it is important to not receive 1514 byte
* frames into jumbo buffers, as it confuses the socket buffer
* accounting code, leading to drops and erratic performance.
*/
if (dev->mtu <= ETH_DATA_LEN)
/* enough for a TCP header */
mgp->small_bytes = (128 > SMP_CACHE_BYTES)
? (128 - MXGEFW_PAD)
: (SMP_CACHE_BYTES - MXGEFW_PAD);
else
/* enough for a vlan encapsulated ETH_DATA_LEN frame */
mgp->small_bytes = VLAN_ETH_FRAME_LEN;
/* Override the small buffer size? */
if (myri10ge_small_bytes >= 0)
mgp->small_bytes = myri10ge_small_bytes;
/* Firmware needs the big buff size as a power of 2. Lie and
* tell him the buffer is larger, because we only use 1
* buffer/pkt, and the mtu will prevent overruns.
*/
big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
while (!is_power_of_2(big_pow2))
big_pow2++;
mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
} else {
big_pow2 = MYRI10GE_ALLOC_SIZE;
mgp->big_bytes = big_pow2;
}
/* setup the per-slice data structures */
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
status = myri10ge_get_txrx(mgp, slice);
if (status != 0) {
netdev_err(dev, "failed to get ring sizes or locations\n");
goto abort_with_rings;
}
status = myri10ge_allocate_rings(ss);
if (status != 0)
goto abort_with_rings;
/* only firmware which supports multiple TX queues
* supports setting up the tx stats on non-zero
* slices */
if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
status = myri10ge_set_stats(mgp, slice);
if (status) {
netdev_err(dev, "Couldn't set stats DMA\n");
goto abort_with_rings;
}
/* Initialize the slice spinlock and state used for polling */
myri10ge_ss_init_lock(ss);
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
cmd.data0 = mgp->small_bytes;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
cmd.data0 = big_pow2;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't set buffer sizes\n");
goto abort_with_rings;
}
/*
* Set Linux style TSO mode; this is needed only on newer
* firmware versions. Older versions default to Linux
* style TSO
*/
cmd.data0 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
if (status && status != -ENOSYS) {
netdev_err(dev, "Couldn't set TSO mode\n");
goto abort_with_rings;
}
mgp->link_state = ~0U;
mgp->rdma_tags_available = 15;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't bring up link\n");
goto abort_with_rings;
}
mgp->running = MYRI10GE_ETH_RUNNING;
mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
add_timer(&mgp->watchdog_timer);
netif_tx_wake_all_queues(dev);
return 0;
abort_with_rings:
while (slice) {
slice--;
napi_disable(&mgp->ss[slice].napi);
}
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
myri10ge_free_irq(mgp);
abort_with_nothing:
mgp->running = MYRI10GE_ETH_STOPPED;
return -ENOMEM;
}
static int myri10ge_close(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int status, old_down_cnt;
int i;
if (mgp->running != MYRI10GE_ETH_RUNNING)
return 0;
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
/* Lock the slice to prevent the busy_poll handler from
* accessing it. Later when we bring the NIC up, myri10ge_open
* resets the slice including this lock.
*/
while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
pr_info("Slice %d locked\n", i);
mdelay(1);
}
}
local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
if (mgp->rebooted == 0) {
old_down_cnt = mgp->down_cnt;
mb();
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
netdev_err(dev, "Couldn't bring down link\n");
wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
HZ);
if (old_down_cnt == mgp->down_cnt)
netdev_err(dev, "never got down irq\n");
}
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
mgp->running = MYRI10GE_ETH_STOPPED;
return 0;
}
/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* backwards one at a time and handle ring wraps */
static inline void
myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
struct mcp_kreq_ether_send *src, int cnt)
{
int idx, starting_slot;
starting_slot = tx->req;
while (cnt > 1) {
cnt--;
idx = (starting_slot + cnt) & tx->mask;
myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
mb();
}
}
/*
* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's flags
* to mark them valid only after writing the entire chain.
*/
static inline void
myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
int cnt)
{
int idx, i;
struct mcp_kreq_ether_send __iomem *dstp, *dst;
struct mcp_kreq_ether_send *srcp;
u8 last_flags;
idx = tx->req & tx->mask;
last_flags = src->flags;
src->flags = 0;
mb();
dst = dstp = &tx->lanai[idx];
srcp = src;
if ((idx + cnt) < tx->mask) {
for (i = 0; i < (cnt - 1); i += 2) {
myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
mb(); /* force write every 32 bytes */
srcp += 2;
dstp += 2;
}
} else {
/* submit all but the first request, and ensure
* that it is submitted below */
myri10ge_submit_req_backwards(tx, src, cnt);
i = 0;
}
if (i < cnt) {
/* submit the first request */
myri10ge_pio_copy(dstp, srcp, sizeof(*src));
mb(); /* barrier before setting valid flag */
}
/* re-write the last 32-bits with the valid flags */
src->flags = last_flags;
put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
tx->req += cnt;
mb();
}
static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
struct myri10ge_tx_buf *tx, int idx)
{
unsigned int len;
int last_idx;
/* Free any DMA resources we've alloced and clear out the skb slot */
last_idx = (idx + 1) & tx->mask;
idx = tx->req & tx->mask;
do {
len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
}
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
* counting tricky. So rather than try to count segments up front, we
* just give up if there are too few segments to hold a reasonably
* fragmented packet currently available. If we run
* out of segments while preparing a packet for DMA, we just linearize
* it and try again.
*/
static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
struct skb_frag_struct *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
__be32 high_swapped;
unsigned int len;
int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
u16 pseudo_hdr_offset, cksum_offset, queue;
int cum_len, seglen, boundary, rdma_count;
u8 flags, odd_flag;
queue = skb_get_queue_mapping(skb);
ss = &mgp->ss[queue];
netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
tx = &ss->tx;
again:
req = tx->req_list;
avail = tx->mask - 1 - (tx->req - tx->done);
mss = 0;
max_segments = MXGEFW_MAX_SEND_DESC;
if (skb_is_gso(skb)) {
mss = skb_shinfo(skb)->gso_size;
max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
}
if ((unlikely(avail < max_segments))) {
/* we are out of transmit resources */
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
return NETDEV_TX_BUSY;
}
/* Setup checksum offloading, if needed */
cksum_offset = 0;
pseudo_hdr_offset = 0;
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
if (unlikely(!mss && (cksum_offset > 255 ||
pseudo_hdr_offset > 127))) {
if (skb_checksum_help(skb))
goto drop;
cksum_offset = 0;
pseudo_hdr_offset = 0;
} else {
odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
flags |= MXGEFW_FLAGS_CKSUM;
}
}
cum_len = 0;
if (mss) { /* TSO */
/* this removes any CKSUM flag from before */
flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
/* negative cum_len signifies to the
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
* the need to parse the headers */
if (skb_is_gso_v6(skb)) {
cksum_offset = tcp_hdrlen(skb);
/* Can only handle headers <= max_tso6 long */
if (unlikely(-cum_len > mgp->max_tso6))
return myri10ge_sw_tso(skb, dev);
}
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
* the checksum by parsing the header. */
pseudo_hdr_offset = mss;
} else
/* Mark small packets, and pad out tiny packets */
if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
flags |= MXGEFW_FLAGS_SMALL;
/* pad frames to at least ETH_ZLEN bytes */
if (eth_skb_pad(skb)) {
/* The packet is gone, so we must
* return 0 */
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
}
/* map the skb for DMA */
len = skb_headlen(skb);
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
goto drop;
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
count = 0;
rdma_count = 0;
/* "rdma_count" is the number of RDMAs belonging to the
* current packet BEFORE the current send request. For
* non-TSO packets, this is equal to "count".
* For TSO packets, rdma_count needs to be reset
* to 0 after a segment cut.
*
* The rdma_count field of the send request is
* the number of RDMAs of the packet starting at
* that request. For TSO send requests with one ore more cuts
* in the middle, this is the number of RDMAs starting
* after the last cut in the request. All previous
* segments before the last cut implicitly have 1 RDMA.
*
* Since the number of RDMAs is not known beforehand,
* it must be filled-in retroactively - after each
* segmentation cut or at the end of the entire packet.
*/
while (1) {
/* Break the SKB or Fragment up into pieces which
* do not cross mgp->tx_boundary */
low = MYRI10GE_LOWPART_TO_U32(bus);
high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
while (len) {
u8 flags_next;
int cum_len_next;
if (unlikely(count == max_segments))
goto abort_linearize;
boundary =
(low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
seglen = boundary - low;
if (seglen > len)
seglen = len;
flags_next = flags & ~MXGEFW_FLAGS_FIRST;
cum_len_next = cum_len + seglen;
if (mss) { /* TSO */
(req - rdma_count)->rdma_count = rdma_count + 1;
if (likely(cum_len >= 0)) { /* payload */
int next_is_first, chop;
chop = (cum_len_next > mss);
cum_len_next = cum_len_next % mss;
next_is_first = (cum_len_next == 0);
flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
rdma_count += chop & ~next_is_first;
} else if (likely(cum_len_next >= 0)) { /* header ends */
int small;
rdma_count = -1;
cum_len_next = 0;
seglen = -cum_len;
small = (mss <= MXGEFW_SEND_SMALL_SIZE);
flags_next = MXGEFW_FLAGS_TSO_PLD |
MXGEFW_FLAGS_FIRST |
(small * MXGEFW_FLAGS_SMALL);
}
}
req->addr_high = high_swapped;
req->addr_low = htonl(low);
req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
req->pad = 0; /* complete solid 16-byte block; does this matter? */
req->rdma_count = 1;
req->length = htons(seglen);
req->cksum_offset = cksum_offset;
req->flags = flags | ((cum_len & 1) * odd_flag);
low += seglen;
len -= seglen;
cum_len = cum_len_next;
flags = flags_next;
req++;
count++;
rdma_count++;
if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
if (unlikely(cksum_offset > seglen))
cksum_offset -= seglen;
else
cksum_offset = 0;
}
}
if (frag_idx == frag_cnt)
break;
/* map next fragment for DMA */
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
myri10ge_unmap_tx_dma(mgp, tx, idx);
goto drop;
}
idx = (count + tx->req) & tx->mask;
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
if (mss)
do {
req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
MXGEFW_FLAGS_FIRST)));
idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1;
myri10ge_submit_req(tx, tx->req_list, count);
/* if using multiple tx queues, make sure NIC polls the
* current slice */
if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
mmiowb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
}
return NETDEV_TX_OK;
abort_linearize:
myri10ge_unmap_tx_dma(mgp, tx, idx);
if (skb_is_gso(skb)) {
netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;
}
if (skb_linearize(skb))
goto drop;
tx->linearized++;
goto again;
drop:
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *segs, *curr;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
if (IS_ERR(segs))
goto drop;
while (segs) {
curr = segs;
segs = segs->next;
curr->next = NULL;
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
segs = segs->next;
curr->next = NULL;
dev_kfree_skb_any(segs);
}
goto drop;
}
}
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
drop:
ss = &mgp->ss[skb_get_queue_mapping(skb)];
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
const struct myri10ge_priv *mgp = netdev_priv(dev);
const struct myri10ge_slice_netstats *slice_stats;
int i;
for (i = 0; i < mgp->num_slices; i++) {
slice_stats = &mgp->ss[i].stats;
stats->rx_packets += slice_stats->rx_packets;
stats->tx_packets += slice_stats->tx_packets;
stats->rx_bytes += slice_stats->rx_bytes;
stats->tx_bytes += slice_stats->tx_bytes;
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
return stats;
}
static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
/* This firmware is known to not support multicast */
if (!mgp->fw_multicast_support)
return;
/* Disable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
/* request to disable multicast filtering, so quit here */
return;
}
/* Flush the filters */
err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
err);
goto abort;
}
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
memcpy(data, &ha->addr, ETH_ALEN);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
err, ha->addr);
goto abort;
}
}
/* Enable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
return;
abort:
return;
}
static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
struct myri10ge_priv *mgp = netdev_priv(dev);
int status;
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
status = myri10ge_update_mac_address(mgp, sa->sa_data);
if (status != 0) {
netdev_err(dev, "changing mac address failed with %d\n",
status);
return status;
}
/* change the dev structure */
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return 0;
}
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
int error = 0;
if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
return -EINVAL;
}
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
dev->mtu = new_mtu;
myri10ge_open(dev);
} else
dev->mtu = new_mtu;
return error;
}
/*
* Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
* Only do it if the bridge is a root port since we don't want to disturb
* any other device, except if forced with myri10ge_ecrc_enable > 1.
*/
static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
{
struct pci_dev *bridge = mgp->pdev->bus->self;
struct device *dev = &mgp->pdev->dev;
int cap;
unsigned err_cap;
int ret;
if (!myri10ge_ecrc_enable || !bridge)
return;
/* check that the bridge is a root port */
if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
if (myri10ge_ecrc_enable > 1) {
struct pci_dev *prev_bridge, *old_bridge = bridge;
/* Walk the hierarchy up to the root port
* where ECRC has to be enabled */
do {
prev_bridge = bridge;
bridge = bridge->bus->self;
if (!bridge || prev_bridge == bridge) {
dev_err(dev,
"Failed to find root port"
" to force ECRC\n");
return;
}
} while (pci_pcie_type(bridge) !=
PCI_EXP_TYPE_ROOT_PORT);
dev_info(dev,
"Forcing ECRC on non-root port %s"
" (enabling on root port %s)\n",
pci_name(old_bridge), pci_name(bridge));
} else {
dev_err(dev,
"Not enabling ECRC on non-root port %s\n",
pci_name(bridge));
return;
}
}
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
if (!cap)
return;
ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
if (ret) {
dev_err(dev, "failed reading ext-conf-space of %s\n",
pci_name(bridge));
dev_err(dev, "\t pci=nommconf in use? "
"or buggy/incomplete/absent ACPI MCFG attr?\n");
return;
}
if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
return;
err_cap |= PCI_ERR_CAP_ECRC_GENE;
pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
}
/*
* The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
* when the PCI-E Completion packets are aligned on an 8-byte
* boundary. Some PCI-E chip sets always align Completion packets; on
* the ones that do not, the alignment can be enforced by enabling
* ECRC generation (if supported).
*
* When PCI-E Completion packets are not aligned, it is actually more
* efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
*
* If the driver can neither enable ECRC nor verify that it has
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
* should also ensure that it never gives the device a Read-DMA which is
* larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
* firmware image, and set tx_boundary to 4KB.
*/
static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct device *dev = &pdev->dev;
int status;
mgp->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
*/
status = pcie_get_readrq(pdev);
if (status < 0) {
dev_err(dev, "Couldn't read max read req size: %d\n", status);
goto abort;
}
if (status != 4096) {
dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
mgp->tx_boundary = 2048;
}
/*
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
}
/*
* Enable ECRC if possible
*/
myri10ge_enable_ecrc(mgp);
/*
* Run a DMA test which watches for unaligned completions and
* aborts on the first one seen.
*/
status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
if (status == 0)
return; /* keep the aligned firmware */
if (status != -E2BIG)
dev_warn(dev, "DMA test failed: %d\n", status);
if (status == -ENOSYS)
dev_warn(dev, "Falling back to ethp! "
"Please install up to date fw\n");
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
int overridden = 0;
if (myri10ge_force_firmware == 0) {
int link_width;
u16 lnk;
pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
link_width = (lnk >> 4) & 0x3f;
/* Check to see if Link is less than 8 or if the
* upstream bridge is known to provide aligned
* completions */
if (link_width < 8) {
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
} else {
if (myri10ge_force_firmware == 1) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
kernel_param_lock(THIS_MODULE);
if (myri10ge_fw_name != NULL) {
char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
if (fw_name) {
overridden = 1;
set_fw_name(mgp, fw_name, true);
}
}
kernel_param_unlock(THIS_MODULE);
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
mgp->fw_name);
}
static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
{
struct pci_dev *bridge = pdev->bus->self;
int cap;
u32 mask;
if (bridge == NULL)
return;
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
if (cap) {
/* a sram parity error can cause a surprise link
* down; since we expect and can recover from sram
* parity errors, mask surprise link down events */
pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
mask |= 0x20;
pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
}
}
#ifdef CONFIG_PM
static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
netif_device_detach(netdev);
if (netif_running(netdev)) {
netdev_info(netdev, "closing\n");
rtnl_lock();
myri10ge_close(netdev);
rtnl_unlock();
}
myri10ge_dummy_rdma(mgp, 0);
pci_save_state(pdev);
pci_disable_device(pdev);
return pci_set_power_state(pdev, pci_choose_state(pdev, state));
}
static int myri10ge_resume(struct pci_dev *pdev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
int status;
u16 vendor;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */
msleep(5); /* give card time to respond */
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return -EIO;
}
pci_restore_state(pdev);
status = pci_enable_device(pdev);
if (status) {
dev_err(&pdev->dev, "failed to enable device\n");
return status;
}
pci_set_master(pdev);
myri10ge_reset(mgp);
myri10ge_dummy_rdma(mgp, 1);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
pci_save_state(pdev);
if (netif_running(netdev)) {
rtnl_lock();
status = myri10ge_open(netdev);
rtnl_unlock();
if (status != 0)
goto abort_with_enabled;
}
netif_device_attach(netdev);
return 0;
abort_with_enabled:
pci_disable_device(pdev);
return -EIO;
}
#endif /* CONFIG_PM */
static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int vs = mgp->vendor_specific_offset;
u32 reboot;
/*enter read32 mode */
pci_write_config_byte(pdev, vs + 0x10, 0x3);
/*read REBOOT_STATUS (0xfffffff0) */
pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
pci_read_config_dword(pdev, vs + 0x14, &reboot);
return reboot;
}
static void
myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
int *busy_slice_cnt, u32 rx_pause_cnt)
{
struct myri10ge_priv *mgp = ss->mgp;
int slice = ss - mgp->ss;
if (ss->tx.req != ss->tx.done &&
ss->tx.done == ss->watchdog_tx_done &&
ss->watchdog_tx_req != ss->watchdog_tx_done) {
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
netdev_warn(mgp->dev, "slice %d: TX paused, "
"check link partner\n", slice);
} else {
netdev_warn(mgp->dev,
"slice %d: TX stuck %d %d %d %d %d %d\n",
slice, ss->tx.queue_active, ss->tx.req,
ss->tx.done, ss->tx.pkt_start,
ss->tx.pkt_done,
(int)ntohl(mgp->ss[slice].fw_stats->
send_done_count));
*reset_needed = 1;
ss->stuck = 1;
}
}
if (ss->watchdog_tx_done != ss->tx.done ||
ss->watchdog_rx_done != ss->rx_done.cnt) {
*busy_slice_cnt += 1;
}
ss->watchdog_tx_done = ss->tx.done;
ss->watchdog_tx_req = ss->tx.req;
ss->watchdog_rx_done = ss->rx_done.cnt;
}
/*
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
static void myri10ge_watchdog(struct work_struct *work)
{
struct myri10ge_priv *mgp =
container_of(work, struct myri10ge_priv, watchdog_work);
struct myri10ge_slice_state *ss;
u32 reboot, rx_pause_cnt;
int status, rebooted;
int i;
int reset_needed = 0;
int busy_slice_cnt = 0;
u16 cmd, vendor;
mgp->watchdog_resets++;
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
rebooted = 0;
if ((cmd & PCI_COMMAND_MASTER) == 0) {
/* Bus master DMA disabled? Check to see
* if the card rebooted due to a parity error
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
reboot, myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
mgp->rebooted = 1;
rebooted = 1;
myri10ge_close(mgp->dev);
myri10ge_reset_recover--;
mgp->rebooted = 0;
/*
* A rebooted nic will come back with config space as
* it was after power was applied to PCIe bus.
* Attempt to restore config space which was saved
* when the driver was loaded, or the last time the
* nic was resumed from power saving mode.
*/
pci_restore_state(mgp->pdev);
/* save state again for accounting reasons */
pci_save_state(mgp->pdev);
} else {
/* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in
* this case */
if (cmd == 0xffff) {
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return;
}
}
/* Perhaps it is a software error. See if stuck slice
* has recovered, reset if not */
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
for (i = 0; i < mgp->num_slices; i++) {
ss = mgp->ss;
if (ss->stuck) {
myri10ge_check_slice(ss, &reset_needed,
&busy_slice_cnt,
rx_pause_cnt);
ss->stuck = 0;
}
}
if (!reset_needed) {
netdev_dbg(mgp->dev, "not resetting\n");
return;
}
netdev_err(mgp->dev, "device timeout, resetting\n");
}
if (!rebooted) {
rtnl_lock();
myri10ge_close(mgp->dev);
}
status = myri10ge_load_firmware(mgp, 1);
if (status != 0)
netdev_err(mgp->dev, "failed to load firmware\n");
else
myri10ge_open(mgp->dev);
rtnl_unlock();
}
/*
* We use our own timer routine rather than relying upon
* netdev->tx_timeout because we have a very large hardware transmit
* queue. Due to the large queue, the netdev->tx_timeout function
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
static void myri10ge_watchdog_timer(unsigned long arg)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
int i, reset_needed, busy_slice_cnt;
u32 rx_pause_cnt;
u16 cmd;
mgp = (struct myri10ge_priv *)arg;
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
for (i = 0, reset_needed = 0;
i < mgp->num_slices && reset_needed == 0; ++i) {
ss = &mgp->ss[i];
if (ss->rx_small.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD,
1);
if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
myri10ge_fill_thresh)
ss->rx_small.watchdog_needed = 0;
}
if (ss->rx_big.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
mgp->big_bytes, 1);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
myri10ge_fill_thresh)
ss->rx_big.watchdog_needed = 0;
}
myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
rx_pause_cnt);
}
/* if we've sent or received no traffic, poll the NIC to
* ensure it is still there. Otherwise, we risk not noticing
* an error in a timely fashion */
if (busy_slice_cnt == 0) {
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
if ((cmd & PCI_COMMAND_MASTER) == 0) {
reset_needed = 1;
}
}
mgp->watchdog_pause = rx_pause_cnt;
if (reset_needed) {
schedule_work(&mgp->watchdog_work);
} else {
/* rearm timer */
mod_timer(&mgp->watchdog_timer,
jiffies + myri10ge_watchdog_timeout * HZ);
}
}
static void myri10ge_free_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
if (mgp->ss == NULL)
return;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (ss->rx_done.entry != NULL) {
bytes = mgp->max_intr_slots *
sizeof(*ss->rx_done.entry);
dma_free_coherent(&pdev->dev, bytes,
ss->rx_done.entry, ss->rx_done.bus);
ss->rx_done.entry = NULL;
}
if (ss->fw_stats != NULL) {
bytes = sizeof(*ss->fw_stats);
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
napi_hash_del(&ss->napi);
netif_napi_del(&ss->napi);
}
/* Wait till napi structs are no longer used, and then free ss. */
synchronize_rcu();
kfree(mgp->ss);
mgp->ss = NULL;
}
static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
bytes = sizeof(*mgp->ss) * mgp->num_slices;
mgp->ss = kzalloc(bytes, GFP_KERNEL);
if (mgp->ss == NULL) {
return -ENOMEM;
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
GFP_KERNEL);
if (ss->fw_stats == NULL)
goto abort;
ss->mgp = mgp;
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
napi_hash_add(&ss->napi);
}
return 0;
abort:
myri10ge_free_slices(mgp);
return -ENOMEM;
}
/*
* This function determines the number of slices supported.
* The number slices is the minimum of the number of CPUS,
* the number of MSI-X irqs supported, the number of slices
* supported by the firmware
*/
static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
int i, status, ncpus;
mgp->num_slices = 1;
ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
old_allocated = mgp->fw_name_allocated;
/* don't free old_fw if we override it. */
mgp->fw_name_allocated = false;
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
if (old_allocated)
kfree(old_fw);
return;
}
/* hit the board with a reset to ensure it is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
goto abort_with_fw;
}
mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
/* tell it the size of the interrupt queues */
cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
goto abort_with_fw;
}
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
if (status != 0)
goto abort_with_fw;
else
mgp->num_slices = cmd.data0;
/* Only allow multiple slices if MSI-X is usable */
if (!myri10ge_msi) {
goto abort_with_fw;
}
/* if the admin did not specify a limit to how many
* slices we should use, cap it automatically to the
* number of CPUs currently online */
if (myri10ge_max_slices == -1)
myri10ge_max_slices = ncpus;
if (mgp->num_slices > myri10ge_max_slices)
mgp->num_slices = myri10ge_max_slices;
/* Now try to allocate as many MSI-X vectors as we have
* slices. We give up on MSI-X if we can only get a single
* vector. */
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
goto no_msix;
status = pci_enable_msix_range(pdev,
mgp->msix_vectors,
mgp->num_slices,
mgp->num_slices);
if (status < 0)
goto no_msix;
pci_disable_msix(pdev);
if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
} else {
mgp->num_slices = status;
}
}
no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
}
abort_with_fw:
mgp->num_slices = 1;
set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_open = myri10ge_open,
.ndo_stop = myri10ge_close,
.ndo_start_xmit = myri10ge_xmit,
.ndo_get_stats64 = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = myri10ge_busy_poll,
#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
static int board_number;
netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
if (netdev == NULL)
return -ENOMEM;
SET_NETDEV_DEV(netdev, &pdev->dev);
mgp = netdev_priv(netdev);
mgp->dev = netdev;
mgp->pdev = pdev;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
mgp->board_number = board_number;
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "pci_enable_device call failed\n");
status = -ENODEV;
goto abort_with_netdev;
}
/* Find the vendor-specific cap so we can check
* the reboot register later on */
mgp->vendor_specific_offset
= pci_find_capability(pdev, PCI_CAP_ID_VNDR);
/* Set our max read request to 4KB */
status = pcie_set_readrq(pdev, 4096);
if (status != 0) {
dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
status);
goto abort_with_enabled;
}
myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev);
dac_enabled = 1;
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (status != 0) {
dac_enabled = 0;
dev_err(&pdev->dev,
"64-bit pci address mask was refused, "
"trying 32-bit\n");
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
}
if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
if (!mgp->cmd) {
status = -ENOMEM;
goto abort_with_enabled;
}
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
mgp->board_span, mgp->iomem_base);
status = -ENXIO;
goto abort_with_mtrr;
}
hdr_offset =
swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
if (mgp->sram_size > mgp->board_span ||
mgp->sram_size <= MYRI10GE_FW_OFFSET) {
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
status = myri10ge_read_mac_addr(mgp);
if (status)
goto abort_with_ioremap;
for (i = 0; i < ETH_ALEN; i++)
netdev->dev_addr[i] = mgp->mac_addr[i];
myri10ge_select_firmware(mgp);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
dev_err(&pdev->dev, "failed to load firmware\n");
goto abort_with_ioremap;
}
myri10ge_probe_slices(mgp);
status = myri10ge_alloc_slices(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to alloc slice state\n");
goto abort_with_firmware;
}
netif_set_real_num_tx_queues(netdev, mgp->num_slices);
netif_set_real_num_rx_queues(netdev, mgp->num_slices);
status = myri10ge_reset(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed reset\n");
goto abort_with_slices;
}
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_setup_dca(mgp);
#endif
pci_set_drvdata(pdev, mgp);
if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
myri10ge_initial_mtu = 68;
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->mtu = myri10ge_initial_mtu;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features;
if (dac_enabled)
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
netdev->vlan_features &= ~NETIF_F_TSO6;
if (mgp->fw_ver_tiny < 32)
netdev->vlan_features &= ~NETIF_F_TSO;
/* make sure we can get an irq, and that MSI can be
* setup (if available). */
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_firmware;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
pci_save_state(pdev);
/* Setup the watchdog timer */
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
goto abort_with_state;
}
if (mgp->msix_enabled)
dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
else
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->msi_enabled ? "MSI" : "xPIC",
pdev->irq, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
board_number++;
return 0;
abort_with_state:
pci_restore_state(pdev);
abort_with_slices:
myri10ge_free_slices(mgp);
abort_with_firmware:
myri10ge_dummy_rdma(mgp, 0);
abort_with_ioremap:
if (mgp->mac_addr_string != NULL)
dev_err(&pdev->dev,
"myri10ge_probe() failed: MAC=%s, SN=%ld\n",
mgp->mac_addr_string, mgp->serial_number);
iounmap(mgp->sram);
abort_with_mtrr:
arch_phys_wc_del(mgp->wc_cookie);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
/*
* myri10ge_remove
*
* Does what is necessary to shutdown one Myrinet device. Called
* once for each Myrinet card by the kernel when a module is
* unloaded.
*/
static void myri10ge_remove(struct pci_dev *pdev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return;
cancel_work_sync(&mgp->watchdog_work);
netdev = mgp->dev;
unregister_netdev(netdev);
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_teardown_dca(mgp);
#endif
myri10ge_dummy_rdma(mgp, 0);
/* avoid a memory leak */
pci_restore_state(pdev);
iounmap(mgp->sram);
arch_phys_wc_del(mgp->wc_cookie);
myri10ge_free_slices(mgp);
kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
static const struct pci_device_id myri10ge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
{0},
};
MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
static struct pci_driver myri10ge_driver = {
.name = "myri10ge",
.probe = myri10ge_probe,
.remove = myri10ge_remove,
.id_table = myri10ge_pci_tbl,
#ifdef CONFIG_PM
.suspend = myri10ge_suspend,
.resume = myri10ge_resume,
#endif
};
#ifdef CONFIG_MYRI10GE_DCA
static int
myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
{
int err = driver_for_each_device(&myri10ge_driver.driver,
NULL, &event,
myri10ge_notify_dca_device);
if (err)
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static struct notifier_block myri10ge_dca_notifier = {
.notifier_call = myri10ge_notify_dca,
.next = NULL,
.priority = 0,
};
#endif /* CONFIG_MYRI10GE_DCA */
static __init int myri10ge_init_module(void)
{
pr_info("Version %s\n", MYRI10GE_VERSION_STR);
if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
pr_err("Illegal rssh hash type %d, defaulting to source port\n",
myri10ge_rss_hash);
myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
}
#ifdef CONFIG_MYRI10GE_DCA
dca_register_notify(&myri10ge_dca_notifier);
#endif
if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
myri10ge_max_slices = MYRI10GE_MAX_SLICES;
return pci_register_driver(&myri10ge_driver);
}
module_init(myri10ge_init_module);
static __exit void myri10ge_cleanup_module(void)
{
#ifdef CONFIG_MYRI10GE_DCA
dca_unregister_notify(&myri10ge_dca_notifier);
#endif
pci_unregister_driver(&myri10ge_driver);
}
module_exit(myri10ge_cleanup_module);
| gpl-2.0 |
Dinjesk/android_kernel_oneplus_msm8996 | drivers/watchdog/tegra_wdt.c | 525 | 7859 | /*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
/* minimum and maximum watchdog trigger timeout, in seconds */
#define MIN_WDT_TIMEOUT 1
#define MAX_WDT_TIMEOUT 255
/*
* Base of the WDT registers, from the timer base address. There are
* actually 5 watchdogs that can be configured (by pairing with an available
* timer), at bases 0x100 + (WDT ID) * 0x20, where WDT ID is 0 through 4.
* This driver only configures the first watchdog (WDT ID 0).
*/
#define WDT_BASE 0x100
#define WDT_ID 0
/*
* Register base of the timer that's selected for pairing with the watchdog.
* This driver arbitrarily uses timer 5, which is currently unused by
* other drivers (in particular, the Tegra clocksource driver). If this
* needs to change, take care that the new timer is not used by the
* clocksource driver.
*/
#define WDT_TIMER_BASE 0x60
#define WDT_TIMER_ID 5
/* WDT registers */
#define WDT_CFG 0x0
#define WDT_CFG_PERIOD_SHIFT 4
#define WDT_CFG_PERIOD_MASK 0xff
#define WDT_CFG_INT_EN (1 << 12)
#define WDT_CFG_PMC2CAR_RST_EN (1 << 15)
#define WDT_STS 0x4
#define WDT_STS_COUNT_SHIFT 4
#define WDT_STS_COUNT_MASK 0xff
#define WDT_STS_EXP_SHIFT 12
#define WDT_STS_EXP_MASK 0x3
#define WDT_CMD 0x8
#define WDT_CMD_START_COUNTER (1 << 0)
#define WDT_CMD_DISABLE_COUNTER (1 << 1)
#define WDT_UNLOCK (0xc)
#define WDT_UNLOCK_PATTERN (0xc45a << 0)
/* Timer registers */
#define TIMER_PTV 0x0
#define TIMER_EN (1 << 31)
#define TIMER_PERIODIC (1 << 30)
struct tegra_wdt {
struct watchdog_device wdd;
void __iomem *wdt_regs;
void __iomem *tmr_regs;
};
#define WDT_HEARTBEAT 120
static int heartbeat = WDT_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
"Watchdog heartbeats in seconds. (default = "
__MODULE_STRING(WDT_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int tegra_wdt_start(struct watchdog_device *wdd)
{
struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
u32 val;
/*
* This thing has a fixed 1MHz clock. Normally, we would set the
* period to 1 second by writing 1000000ul, but the watchdog system
* reset actually occurs on the 4th expiration of this counter,
* so we set the period to 1/4 of this amount.
*/
val = 1000000ul / 4;
val |= (TIMER_EN | TIMER_PERIODIC);
writel(val, wdt->tmr_regs + TIMER_PTV);
/*
* Set number of periods and start counter.
*
* Interrupt handler is not required for user space
* WDT accesses, since the caller is responsible to ping the
* WDT to reset the counter before expiration, through ioctls.
*/
val = WDT_TIMER_ID |
(wdd->timeout << WDT_CFG_PERIOD_SHIFT) |
WDT_CFG_PMC2CAR_RST_EN;
writel(val, wdt->wdt_regs + WDT_CFG);
writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
return 0;
}
static int tegra_wdt_stop(struct watchdog_device *wdd)
{
struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
writel(WDT_UNLOCK_PATTERN, wdt->wdt_regs + WDT_UNLOCK);
writel(WDT_CMD_DISABLE_COUNTER, wdt->wdt_regs + WDT_CMD);
writel(0, wdt->tmr_regs + TIMER_PTV);
return 0;
}
static int tegra_wdt_ping(struct watchdog_device *wdd)
{
struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
writel(WDT_CMD_START_COUNTER, wdt->wdt_regs + WDT_CMD);
return 0;
}
static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
wdd->timeout = timeout;
if (watchdog_active(wdd))
return tegra_wdt_start(wdd);
return 0;
}
static unsigned int tegra_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct tegra_wdt *wdt = watchdog_get_drvdata(wdd);
u32 val;
int count;
int exp;
val = readl(wdt->wdt_regs + WDT_STS);
/* Current countdown (from timeout) */
count = (val >> WDT_STS_COUNT_SHIFT) & WDT_STS_COUNT_MASK;
/* Number of expirations (we are waiting for the 4th expiration) */
exp = (val >> WDT_STS_EXP_SHIFT) & WDT_STS_EXP_MASK;
/*
* The entire thing is divided by 4 because we are ticking down 4 times
* faster due to needing to wait for the 4th expiration.
*/
return (((3 - exp) * wdd->timeout) + count) / 4;
}
static const struct watchdog_info tegra_wdt_info = {
.options = WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
.firmware_version = 0,
.identity = "Tegra Watchdog",
};
static struct watchdog_ops tegra_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra_wdt_start,
.stop = tegra_wdt_stop,
.ping = tegra_wdt_ping,
.set_timeout = tegra_wdt_set_timeout,
.get_timeleft = tegra_wdt_get_timeleft,
};
static int tegra_wdt_probe(struct platform_device *pdev)
{
struct watchdog_device *wdd;
struct tegra_wdt *wdt;
struct resource *res;
void __iomem *regs;
int ret;
/* This is the timer base. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/*
* Allocate our watchdog driver data, which has the
* struct watchdog_device nested within it.
*/
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
/* Initialize struct tegra_wdt. */
wdt->wdt_regs = regs + WDT_BASE;
wdt->tmr_regs = regs + WDT_TIMER_BASE;
/* Initialize struct watchdog_device. */
wdd = &wdt->wdd;
wdd->timeout = heartbeat;
wdd->info = &tegra_wdt_info;
wdd->ops = &tegra_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
wdd->max_timeout = MAX_WDT_TIMEOUT;
watchdog_set_drvdata(wdd, wdt);
watchdog_set_nowayout(wdd, nowayout);
ret = watchdog_register_device(wdd);
if (ret) {
dev_err(&pdev->dev,
"failed to register watchdog device\n");
return ret;
}
platform_set_drvdata(pdev, wdt);
dev_info(&pdev->dev,
"initialized (heartbeat = %d sec, nowayout = %d)\n",
heartbeat, nowayout);
return 0;
}
static int tegra_wdt_remove(struct platform_device *pdev)
{
struct tegra_wdt *wdt = platform_get_drvdata(pdev);
tegra_wdt_stop(&wdt->wdd);
watchdog_unregister_device(&wdt->wdd);
dev_info(&pdev->dev, "removed wdt\n");
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tegra_wdt_runtime_suspend(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->wdd))
tegra_wdt_stop(&wdt->wdd);
return 0;
}
static int tegra_wdt_runtime_resume(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
if (watchdog_active(&wdt->wdd))
tegra_wdt_start(&wdt->wdd);
return 0;
}
#endif
static const struct of_device_id tegra_wdt_of_match[] = {
{ .compatible = "nvidia,tegra30-timer", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
static const struct dev_pm_ops tegra_wdt_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
tegra_wdt_runtime_resume)
};
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.remove = tegra_wdt_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tegra-wdt",
.pm = &tegra_wdt_pm_ops,
.of_match_table = tegra_wdt_of_match,
},
};
module_platform_driver(tegra_wdt_driver);
MODULE_AUTHOR("NVIDIA Corporation");
MODULE_DESCRIPTION("Tegra Watchdog Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
someone755/android_kernel_sony_msm8974 | drivers/pinctrl/pinctrl-tegra.c | 1293 | 12769 | /*
* Driver for the NVIDIA Tegra pinmux
*
* Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
*
* Derived from code:
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2010 NVIDIA Corporation
* Copyright (C) 2009-2011 ST-Ericsson AB
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <mach/pinconf-tegra.h>
#include "pinctrl-tegra.h"
#define DRIVER_NAME "tegra-pinmux-disabled"
struct tegra_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
const struct tegra_pinctrl_soc_data *soc;
int nbanks;
void __iomem **regs;
};
static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
{
return readl(pmx->regs[bank] + reg);
}
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
{
writel(val, pmx->regs[bank] + reg);
}
static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->ngroups;
}
static const char *tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->groups[group].name;
}
static int tegra_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
unsigned *num_pins)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
*pins = pmx->soc->groups[group].pins;
*num_pins = pmx->soc->groups[group].npins;
return 0;
}
static void tegra_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned offset)
{
seq_printf(s, " " DRIVER_NAME);
}
static struct pinctrl_ops tegra_pinctrl_ops = {
.get_groups_count = tegra_pinctrl_get_groups_count,
.get_group_name = tegra_pinctrl_get_group_name,
.get_group_pins = tegra_pinctrl_get_group_pins,
.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
};
static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->nfunctions;
}
static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
unsigned function)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->functions[function].name;
}
static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
unsigned function,
const char * const **groups,
unsigned * const num_groups)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
*groups = pmx->soc->functions[function].groups;
*num_groups = pmx->soc->functions[function].ngroups;
return 0;
}
static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
int i;
u32 val;
g = &pmx->soc->groups[group];
if (g->mux_reg < 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(g->funcs); i++) {
if (g->funcs[i] == function)
break;
}
if (i == ARRAY_SIZE(g->funcs))
return -EINVAL;
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= i << g->mux_bit;
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0;
}
static void tegra_pinctrl_disable(struct pinctrl_dev *pctldev,
unsigned function, unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
u32 val;
g = &pmx->soc->groups[group];
if (g->mux_reg < 0)
return;
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= g->func_safe << g->mux_bit;
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
}
static struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.enable = tegra_pinctrl_enable,
.disable = tegra_pinctrl_disable,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
const struct tegra_pingroup *g,
enum tegra_pinconf_param param,
s8 *bank, s16 *reg, s8 *bit, s8 *width)
{
switch (param) {
case TEGRA_PINCONF_PARAM_PULL:
*bank = g->pupd_bank;
*reg = g->pupd_reg;
*bit = g->pupd_bit;
*width = 2;
break;
case TEGRA_PINCONF_PARAM_TRISTATE:
*bank = g->tri_bank;
*reg = g->tri_reg;
*bit = g->tri_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_ENABLE_INPUT:
*bank = g->einput_bank;
*reg = g->einput_reg;
*bit = g->einput_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_OPEN_DRAIN:
*bank = g->odrain_bank;
*reg = g->odrain_reg;
*bit = g->odrain_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOCK:
*bank = g->lock_bank;
*reg = g->lock_reg;
*bit = g->lock_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_IORESET:
*bank = g->ioreset_bank;
*reg = g->ioreset_reg;
*bit = g->ioreset_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->hsm_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_SCHMITT:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->schmitt_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOW_POWER_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->lpmd_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvdn_bit;
*width = g->drvdn_width;
break;
case TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvup_bit;
*width = g->drvup_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwf_bit;
*width = g->slwf_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_RISING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwr_bit;
*width = g->slwr_width;
break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
if (*reg < 0) {
dev_err(pmx->dev,
"Config param %04x not supported on group %s\n",
param, g->name);
return -ENOTSUPP;
}
return 0;
}
static int tegra_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *config)
{
return -ENOTSUPP;
}
static int tegra_pinconf_set(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long config)
{
return -ENOTSUPP;
}
static int tegra_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned group, unsigned long *config)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(*config);
u16 arg;
const struct tegra_pingroup *g;
int ret;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
g = &pmx->soc->groups[group];
ret = tegra_pinconf_reg(pmx, g, param, &bank, ®, &bit, &width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
mask = (1 << width) - 1;
arg = (val >> bit) & mask;
*config = TEGRA_PINCONF_PACK(param, arg);
return 0;
}
static int tegra_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned group, unsigned long config)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(config);
u16 arg = TEGRA_PINCONF_UNPACK_ARG(config);
const struct tegra_pingroup *g;
int ret;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
g = &pmx->soc->groups[group];
ret = tegra_pinconf_reg(pmx, g, param, &bank, ®, &bit, &width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
/* LOCK can't be cleared */
if (param == TEGRA_PINCONF_PARAM_LOCK) {
if ((val & BIT(bit)) && !arg)
return -EINVAL;
}
/* Special-case Boolean values; allow any non-zero as true */
if (width == 1)
arg = !!arg;
/* Range-check user-supplied value */
mask = (1 << width) - 1;
if (arg & ~mask)
return -EINVAL;
/* Update register */
val &= ~(mask << bit);
val |= arg << bit;
pmx_writel(pmx, val, bank, reg);
return 0;
}
static void tegra_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned offset)
{
}
static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned selector)
{
}
struct pinconf_ops tegra_pinconf_ops = {
.pin_config_get = tegra_pinconf_get,
.pin_config_set = tegra_pinconf_set,
.pin_config_group_get = tegra_pinconf_group_get,
.pin_config_group_set = tegra_pinconf_group_set,
.pin_config_dbg_show = tegra_pinconf_dbg_show,
.pin_config_group_dbg_show = tegra_pinconf_group_dbg_show,
};
static struct pinctrl_gpio_range tegra_pinctrl_gpio_range = {
.name = "Tegra GPIOs",
.id = 0,
.base = 0,
};
static struct pinctrl_desc tegra_pinctrl_desc = {
.name = DRIVER_NAME,
.pctlops = &tegra_pinctrl_ops,
.pmxops = &tegra_pinmux_ops,
.confops = &tegra_pinconf_ops,
.owner = THIS_MODULE,
};
static struct of_device_id tegra_pinctrl_of_match[] __devinitdata = {
#ifdef CONFIG_PINCTRL_TEGRA20
{
.compatible = "nvidia,tegra20-pinmux-disabled",
.data = tegra20_pinctrl_init,
},
#endif
#ifdef CONFIG_PINCTRL_TEGRA30
{
.compatible = "nvidia,tegra30-pinmux-disabled",
.data = tegra30_pinctrl_init,
},
#endif
{},
};
static int __devinit tegra_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
tegra_pinctrl_soc_initf initf = NULL;
struct tegra_pmx *pmx;
struct resource *res;
int i;
match = of_match_device(tegra_pinctrl_of_match, &pdev->dev);
if (match)
initf = (tegra_pinctrl_soc_initf)match->data;
#ifdef CONFIG_PINCTRL_TEGRA20
if (!initf)
initf = tegra20_pinctrl_init;
#endif
if (!initf) {
dev_err(&pdev->dev,
"Could not determine SoC-specific init func\n");
return -EINVAL;
}
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
dev_err(&pdev->dev, "Can't alloc tegra_pmx\n");
return -ENOMEM;
}
pmx->dev = &pdev->dev;
(*initf)(&pmx->soc);
tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
tegra_pinctrl_desc.pins = pmx->soc->pins;
tegra_pinctrl_desc.npins = pmx->soc->npins;
for (i = 0; ; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
}
pmx->nbanks = i;
pmx->regs = devm_kzalloc(&pdev->dev, pmx->nbanks * sizeof(*pmx->regs),
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
return -ENODEV;
}
for (i = 0; i < pmx->nbanks; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
dev_err(&pdev->dev, "Missing MEM resource\n");
return -ENODEV;
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
dev_name(&pdev->dev))) {
dev_err(&pdev->dev,
"Couldn't request MEM resource %d\n", i);
return -ENODEV;
}
pmx->regs[i] = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pmx->regs[i]) {
dev_err(&pdev->dev, "Couldn't ioremap regs %d\n", i);
return -ENODEV;
}
}
pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
}
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
dev_dbg(&pdev->dev, "Probed Tegra pinctrl driver\n");
return 0;
}
static int __devexit tegra_pinctrl_remove(struct platform_device *pdev)
{
struct tegra_pmx *pmx = platform_get_drvdata(pdev);
pinctrl_unregister(pmx->pctl);
return 0;
}
static struct platform_driver tegra_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = tegra_pinctrl_of_match,
},
.probe = tegra_pinctrl_probe,
.remove = __devexit_p(tegra_pinctrl_remove),
};
static int __init tegra_pinctrl_init(void)
{
return platform_driver_register(&tegra_pinctrl_driver);
}
arch_initcall(tegra_pinctrl_init);
static void __exit tegra_pinctrl_exit(void)
{
platform_driver_unregister(&tegra_pinctrl_driver);
}
module_exit(tegra_pinctrl_exit);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, tegra_pinctrl_of_match);
| gpl-2.0 |
Outernet-Project/outernetrx-linux | drivers/staging/gdm72xx/sdio_boot.c | 2317 | 3501 | /*
* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/firmware.h>
#include "gdm_sdio.h"
#include "sdio_boot.h"
#define TYPE_A_HEADER_SIZE 4
#define TYPE_A_LOOKAHEAD_SIZE 16
#define YMEM0_SIZE 0x8000 /* 32kbytes */
#define DOWNLOAD_SIZE (YMEM0_SIZE - TYPE_A_HEADER_SIZE)
#define FW_DIR "gdm72xx/"
#define FW_KRN "gdmskrn.bin"
#define FW_RFS "gdmsrfs.bin"
static u8 *tx_buf;
static int ack_ready(struct sdio_func *func)
{
unsigned long start = jiffies;
u8 val;
int ret;
while ((jiffies - start) < HZ) {
val = sdio_readb(func, 0x13, &ret);
if (val & 0x01)
return 1;
schedule();
}
return 0;
}
static int download_image(struct sdio_func *func, const char *img_name)
{
int ret = 0, len, pno;
u8 *buf = tx_buf;
loff_t pos = 0;
int img_len;
const struct firmware *firm;
ret = request_firmware(&firm, img_name, &func->dev);
if (ret < 0) {
dev_err(&func->dev,
"requesting firmware %s failed with error %d\n",
img_name, ret);
return ret;
}
buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
img_len = firm->size;
if (img_len <= 0) {
ret = -1;
goto out;
}
pno = 0;
while (img_len > 0) {
if (img_len > DOWNLOAD_SIZE) {
len = DOWNLOAD_SIZE;
buf[3] = 0;
} else {
len = img_len; /* the last packet */
buf[3] = 2;
}
buf[0] = len & 0xff;
buf[1] = (len >> 8) & 0xff;
buf[2] = (len >> 16) & 0xff;
memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len);
ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE);
if (ret < 0) {
dev_err(&func->dev,
"send image error: packet number = %d ret = %d\n",
pno, ret);
goto out;
}
if (buf[3] == 2) /* The last packet */
break;
if (!ack_ready(func)) {
ret = -EIO;
dev_err(&func->dev, "Ack is not ready.\n");
goto out;
}
ret = sdio_memcpy_fromio(func, buf, 0, TYPE_A_LOOKAHEAD_SIZE);
if (ret < 0) {
dev_err(&func->dev,
"receive ack error: packet number = %d ret = %d\n",
pno, ret);
goto out;
}
sdio_writeb(func, 0x01, 0x13, &ret);
sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */
img_len -= DOWNLOAD_SIZE;
pos += DOWNLOAD_SIZE;
pno++;
}
out:
kfree(buf);
return ret;
}
int sdio_boot(struct sdio_func *func)
{
int ret;
const char *krn_name = FW_DIR FW_KRN;
const char *rfs_name = FW_DIR FW_RFS;
tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL);
if (tx_buf == NULL)
return -ENOMEM;
ret = download_image(func, krn_name);
if (ret)
goto restore_fs;
dev_info(&func->dev, "GCT: Kernel download success.\n");
ret = download_image(func, rfs_name);
if (ret)
goto restore_fs;
dev_info(&func->dev, "GCT: Filesystem download success.\n");
restore_fs:
kfree(tx_buf);
return ret;
}
| gpl-2.0 |
ztemt/NX511J_5.1_kernel | drivers/platform/x86/amilo-rfkill.c | 2317 | 4228 | /*
* Support for rfkill on some Fujitsu-Siemens Amilo laptops.
* Copyright 2011 Ben Hutchings.
*
* Based in part on the fsam7440 driver, which is:
* Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
* and on the fsaa1655g driver, which is:
* Copyright 2006 Martin Večeřa.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
/*
* These values were obtained from disassembling and debugging the
* PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
*/
#define A1655_WIFI_COMMAND 0x10C5
#define A1655_WIFI_ON 0x25
#define A1655_WIFI_OFF 0x45
static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
{
u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
int rc;
i8042_lock_chip();
rc = i8042_command(¶m, A1655_WIFI_COMMAND);
i8042_unlock_chip();
return rc;
}
static const struct rfkill_ops amilo_a1655_rfkill_ops = {
.set_block = amilo_a1655_rfkill_set_block
};
/*
* These values were obtained from disassembling the PM.exe program
* installed in the Fujitsu-Siemens AMILO M 7440
*/
#define M7440_PORT1 0x118f
#define M7440_PORT2 0x118e
#define M7440_RADIO_ON1 0x12
#define M7440_RADIO_ON2 0x80
#define M7440_RADIO_OFF1 0x10
#define M7440_RADIO_OFF2 0x00
static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
{
u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
outb(val1, M7440_PORT1);
outb(val2, M7440_PORT2);
/* Check whether the state has changed correctly */
if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
return -EIO;
return 0;
}
static const struct rfkill_ops amilo_m7440_rfkill_ops = {
.set_block = amilo_m7440_rfkill_set_block
};
static const struct dmi_system_id amilo_rfkill_id_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
},
.driver_data = (void *)&amilo_a1655_rfkill_ops
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
},
.driver_data = (void *)&amilo_m7440_rfkill_ops
},
{}
};
static struct platform_device *amilo_rfkill_pdev;
static struct rfkill *amilo_rfkill_dev;
static int amilo_rfkill_probe(struct platform_device *device)
{
int rc;
const struct dmi_system_id *system_id =
dmi_first_match(amilo_rfkill_id_table);
if (!system_id)
return -ENXIO;
amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
RFKILL_TYPE_WLAN,
system_id->driver_data, NULL);
if (!amilo_rfkill_dev)
return -ENOMEM;
rc = rfkill_register(amilo_rfkill_dev);
if (rc)
goto fail;
return 0;
fail:
rfkill_destroy(amilo_rfkill_dev);
return rc;
}
static int amilo_rfkill_remove(struct platform_device *device)
{
rfkill_unregister(amilo_rfkill_dev);
rfkill_destroy(amilo_rfkill_dev);
return 0;
}
static struct platform_driver amilo_rfkill_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = amilo_rfkill_probe,
.remove = amilo_rfkill_remove,
};
static int __init amilo_rfkill_init(void)
{
int rc;
if (dmi_first_match(amilo_rfkill_id_table) == NULL)
return -ENODEV;
rc = platform_driver_register(&amilo_rfkill_driver);
if (rc)
return rc;
amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
NULL, 0);
if (IS_ERR(amilo_rfkill_pdev)) {
rc = PTR_ERR(amilo_rfkill_pdev);
goto fail;
}
return 0;
fail:
platform_driver_unregister(&amilo_rfkill_driver);
return rc;
}
static void __exit amilo_rfkill_exit(void)
{
platform_device_unregister(amilo_rfkill_pdev);
platform_driver_unregister(&amilo_rfkill_driver);
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
module_init(amilo_rfkill_init);
module_exit(amilo_rfkill_exit);
| gpl-2.0 |
DienoX/NightSimple-5.0.2_BOK2_G901F | drivers/net/wireless/mwifiex/11n_rxreorder.c | 2573 | 19583 | /*
* Marvell Wireless LAN device driver: 802.11n RX Re-ordering
*
* Copyright (C) 2011, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "11n_rxreorder.h"
/*
* This function dispatches all packets in the Rx reorder table until the
* start window.
*
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
static void
mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl, int start_win)
{
int pkt_to_send, i;
void *rx_tmp_ptr;
unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr) {
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
*/
for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i];
tbl->rx_reorder_ptr[pkt_to_send + i] = NULL;
}
tbl->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
* This function dispatches all packets in the Rx reorder table until
* a hole is found.
*
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
int i, j, xchg;
void *rx_tmp_ptr;
unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
if (!tbl->rx_reorder_ptr[i]) {
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
break;
}
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
*/
if (i > 0) {
xchg = tbl->win_size - i;
for (j = 0; j < xchg; ++j) {
tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j];
tbl->rx_reorder_ptr[i + j] = NULL;
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
* This function deletes the Rx reorder table and frees the memory.
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
unsigned long flags;
if (!tbl)
return;
mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) &
(MAX_TID_VALUE - 1));
del_timer(&tbl->timer_context.timer);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
}
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
unsigned long flags;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
return tbl;
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
/* This function retrieves the pointer to an entry in Rx reordering
* table which matches the given TA and deletes it.
*/
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl, *tmp;
unsigned long flags;
if (!ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
mwifiex_del_rx_reorder_entry(priv, tbl);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
*/
static int
mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr)
{
int i;
for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i)
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
return i;
return -1;
}
/*
* This function flushes all the packets in Rx reordering table.
*
* The function checks if any packets are currently buffered in the
* table or not. In case there are packets available, it dispatches
* them and then dumps the Rx reordering table.
*/
static void
mwifiex_flush_data(unsigned long context)
{
struct reorder_tmr_cnxt *ctx =
(struct reorder_tmr_cnxt *) context;
int start_win;
start_win = mwifiex_11n_find_last_seq_num(ctx->ptr);
if (start_win < 0)
return;
dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win);
mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr,
(ctx->ptr->start_win + start_win + 1) &
(MAX_TID_VALUE - 1));
}
/*
* This function creates an entry in Rx reordering table for the
* given TA/TID.
*
* The function also initializes the entry with sequence number, window
* size as well as initializes the timer.
*
* If the received TA/TID pair is already present, all the packets are
* dispatched and the window size is moved until the SSN.
*/
static void
mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
int tid, int win_size, int seq_num)
{
int i;
struct mwifiex_rx_reorder_tbl *tbl, *new_node;
u16 last_seq = 0;
unsigned long flags;
struct mwifiex_sta_node *node;
/*
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt(priv, tbl, seq_num);
return;
}
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
return;
INIT_LIST_HEAD(&new_node->list);
new_node->tid = tid;
memcpy(new_node->ta, ta, ETH_ALEN);
new_node->start_win = seq_num;
if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
"info: AP/ADHOC:last_seq=%d start_win=%d\n",
last_seq, new_node->start_win);
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
node = mwifiex_get_sta_entry(priv, ta);
if (node)
last_seq = node->rx_seq[tid];
}
} else {
last_seq = priv->rx_seq[tid];
}
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
new_node->flags = 0;
new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
GFP_KERNEL);
if (!new_node->rx_reorder_ptr) {
kfree((u8 *) new_node);
dev_err(priv->adapter->dev,
"%s: failed to alloc reorder_ptr\n", __func__);
return;
}
new_node->timer_context.ptr = new_node;
new_node->timer_context.priv = priv;
init_timer(&new_node->timer_context.timer);
new_node->timer_context.timer.function = mwifiex_flush_data;
new_node->timer_context.timer.data =
(unsigned long) &new_node->timer_context;
for (i = 0; i < win_size; ++i)
new_node->rx_reorder_ptr[i] = NULL;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
* This function prepares command for adding a BA request.
*
* Preparation includes -
* - Setting command ID and proper size
* - Setting add BA request buffer
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
{
struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
return 0;
}
/*
* This function prepares command for adding a BA response.
*
* Preparation includes -
* - Setting command ID and proper size
* - Setting add BA response buffer
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct host_cmd_ds_11n_addba_req
*cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
ETH_ALEN);
add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
add_ba_rsp->ssn = cmd_addba_req->ssn;
block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
>> BLOCKACKPARAM_TID_POS;
add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
/* We donot support AMSDU inside AMPDU, hence reset the bit */
block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
BLOCKACKPARAM_WINSIZE_POS);
add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
tid, win_size,
le16_to_cpu(cmd_addba_req->ssn));
return 0;
}
/*
* This function prepares command for deleting a BA request.
*
* Preparation includes -
* - Setting command ID and proper size
* - Setting del BA request buffer
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
{
struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
memcpy(del_ba, data_buf, sizeof(*del_ba));
return 0;
}
/*
* This function identifies if Rx reordering is needed for a received packet.
*
* In case reordering is required, the function will do the reordering
* before sending it to kernel.
*
* The Rx reorder table is checked first with the received TID/TA pair. If
* not found, the received packet is dispatched immediately. But if found,
* the packet is reordered and all the packets in the updated Rx reordering
* table is dispatched until a hole is found.
*
* For sequence number less than the starting window, the packet is dropped.
*/
int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u16 seq_num, u16 tid,
u8 *ta, u8 pkt_type, void *payload)
{
struct mwifiex_rx_reorder_tbl *tbl;
int start_win, end_win, win_size;
u16 pkt_index;
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
if (pkt_type != PKT_TYPE_BAR) {
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, payload);
else
mwifiex_process_rx_packet(priv, payload);
}
return 0;
}
start_win = tbl->start_win;
win_size = tbl->win_size;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
del_timer(&tbl->timer_context.timer);
mod_timer(&tbl->timer_context.timer,
jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
/*
* If seq_num is less then starting win then ignore and drop the
* packet
*/
if (tbl->flags & RXREOR_FORCE_NO_DROP) {
dev_dbg(priv->adapter->dev,
"RXREOR_FORCE_NO_DROP when HS is activated\n");
tbl->flags &= ~RXREOR_FORCE_NO_DROP;
} else {
if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
if (seq_num >= ((start_win + TWOPOW11) &
(MAX_TID_VALUE - 1)) &&
seq_num < start_win)
return -1;
} else if ((seq_num < start_win) ||
(seq_num > (start_win + TWOPOW11))) {
return -1;
}
}
/*
* If this packet is a BAR we adjust seq_num as
* WinStart = seq_num
*/
if (pkt_type == PKT_TYPE_BAR)
seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
if (((end_win < start_win) &&
(seq_num < start_win) && (seq_num > end_win)) ||
((end_win > start_win) && ((seq_num > end_win) ||
(seq_num < start_win)))) {
end_win = seq_num;
if (((seq_num - win_size) + 1) >= 0)
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
mwifiex_11n_dispatch_pkt(priv, tbl, start_win);
}
if (pkt_type != PKT_TYPE_BAR) {
if (seq_num >= start_win)
pkt_index = seq_num - start_win;
else
pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
if (tbl->rx_reorder_ptr[pkt_index])
return -1;
tbl->rx_reorder_ptr[pkt_index] = payload;
}
/*
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
mwifiex_11n_scan_and_dispatch(priv, tbl);
return 0;
}
/*
* This function deletes an entry for a given TID/TA pair.
*
* The TID/TA are taken from del BA event body.
*/
void
mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
u8 type, int initiator)
{
struct mwifiex_rx_reorder_tbl *tbl;
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
u8 cleanup_rx_reorder_tbl;
unsigned long flags;
if (type == TYPE_DELBA_RECEIVE)
cleanup_rx_reorder_tbl = (initiator) ? true : false;
else
cleanup_rx_reorder_tbl = (initiator) ? false : true;
dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
dev_dbg(priv->adapter->dev,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
dev_dbg(priv->adapter->dev,
"event: TID, RA not found in table\n");
return;
}
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
}
}
/*
* This function handles the command response of an add BA response.
*
* Handling includes changing the header fields into CPU format and
* creating the stream, provided the add BA is accepted.
*/
int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
>> BLOCKACKPARAM_TID_POS;
/*
* Check if we had rejected the ADDBA, if yes then do not create
* the stream
*/
if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
win_size = (block_ack_param_set &
IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
dev_dbg(priv->adapter->dev,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
add_ba_rsp->peer_mac_addr, tid,
add_ba_rsp->ssn, win_size);
} else {
dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
}
return 0;
}
/*
* This function handles BA stream timeout event by preparing and sending
* a command to the firmware.
*/
void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
struct host_cmd_ds_11n_batimeout *event)
{
struct host_cmd_ds_11n_delba delba;
memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
delba.del_ba_param_set |=
cpu_to_le16((u16) event->tid << DELBA_TID_POS);
delba.del_ba_param_set |= cpu_to_le16(
(u16) event->origninator << DELBA_INITIATOR_POS);
delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba);
}
/*
* This function cleans up the Rx reorder table by deleting all the entries
* and re-initializing.
*/
void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
{
struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
unsigned long flags;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
mwifiex_reset_11n_rx_seq_num(priv);
}
/*
* This function updates all rx_reorder_tbl's flags.
*/
void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
{
struct mwifiex_private *priv;
struct mwifiex_rx_reorder_tbl *tbl;
unsigned long lock_flags;
int i;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (!priv)
continue;
if (list_empty(&priv->rx_reorder_tbl_ptr))
continue;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
tbl->flags = flags;
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
}
return;
}
| gpl-2.0 |
sailwang94/android_kernel_samsung_ks01lte | drivers/md/dm-raid1.c | 2829 | 35010 | /*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-bio-record.h"
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
* Mirror set structures.
*---------------------------------------------------------------*/
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
DM_RAID1_FLUSH_ERROR,
DM_RAID1_SYNC_ERROR,
DM_RAID1_READ_ERROR
};
struct mirror {
struct mirror_set *ms;
atomic_t error_count;
unsigned long error_type;
struct dm_dev *dev;
sector_t offset;
};
struct mirror_set {
struct dm_target *ti;
struct list_head list;
uint64_t features;
spinlock_t lock; /* protects the lists */
struct bio_list reads;
struct bio_list writes;
struct bio_list failures;
struct bio_list holds; /* bios are waiting until suspend */
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
mempool_t *read_record_pool;
/* recovery */
region_t nr_regions;
int in_sync;
int log_failure;
int leg_failure;
atomic_t suspend;
atomic_t default_mirror; /* Default mirror */
struct workqueue_struct *kmirrord_wq;
struct work_struct kmirrord_work;
struct timer_list timer;
unsigned long timer_pending;
struct work_struct trigger_event;
unsigned nr_mirrors;
struct mirror mirror[0];
};
static void wakeup_mirrord(void *context)
{
struct mirror_set *ms = context;
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
static void delayed_wake_fn(unsigned long data)
{
struct mirror_set *ms = (struct mirror_set *) data;
clear_bit(0, &ms->timer_pending);
wakeup_mirrord(ms);
}
static void delayed_wake(struct mirror_set *ms)
{
if (test_and_set_bit(0, &ms->timer_pending))
return;
ms->timer.expires = jiffies + HZ / 5;
ms->timer.data = (unsigned long) ms;
ms->timer.function = delayed_wake_fn;
add_timer(&ms->timer);
}
static void wakeup_all_recovery_waiters(void *context)
{
wake_up_all(&_kmirrord_recovery_stopped);
}
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
{
unsigned long flags;
int should_wake = 0;
struct bio_list *bl;
bl = (rw == WRITE) ? &ms->writes : &ms->reads;
spin_lock_irqsave(&ms->lock, flags);
should_wake = !(bl->head);
bio_list_add(bl, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
}
static void dispatch_bios(void *context, struct bio_list *bio_list)
{
struct mirror_set *ms = context;
struct bio *bio;
while ((bio = bio_list_pop(bio_list)))
queue_bio(ms, bio, WRITE);
}
#define MIN_READ_RECORDS 20
struct dm_raid1_read_record {
struct mirror *m;
struct dm_bio_details details;
};
static struct kmem_cache *_dm_raid1_read_record_cache;
/*
* Every mirror should look like this one.
*/
#define DEFAULT_MIRROR 0
/*
* This is yucky. We squirrel the mirror struct away inside
* bi_next for read/write buffers. This is safe since the bh
* doesn't get submitted to the lower levels of block layer.
*/
static struct mirror *bio_get_m(struct bio *bio)
{
return (struct mirror *) bio->bi_next;
}
static void bio_set_m(struct bio *bio, struct mirror *m)
{
bio->bi_next = (struct bio *) m;
}
static struct mirror *get_default_mirror(struct mirror_set *ms)
{
return &ms->mirror[atomic_read(&ms->default_mirror)];
}
static void set_default_mirror(struct mirror *m)
{
struct mirror_set *ms = m->ms;
struct mirror *m0 = &(ms->mirror[0]);
atomic_set(&ms->default_mirror, m - m0);
}
static struct mirror *get_valid_mirror(struct mirror_set *ms)
{
struct mirror *m;
for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
if (!atomic_read(&m->error_count))
return m;
return NULL;
}
/* fail_mirror
* @m: mirror device to fail
* @error_type: one of the enum's, DM_RAID1_*_ERROR
*
* If errors are being handled, record the type of
* error encountered for this device. If this type
* of error has already been recorded, we can return;
* otherwise, we must signal userspace by triggering
* an event. Additionally, if the device is the
* primary device, we must choose a new primary, but
* only if the mirror is in-sync.
*
* This function must not block.
*/
static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
{
struct mirror_set *ms = m->ms;
struct mirror *new;
ms->leg_failure = 1;
/*
* error_count is used for nothing more than a
* simple way to tell if a device has encountered
* errors.
*/
atomic_inc(&m->error_count);
if (test_and_set_bit(error_type, &m->error_type))
return;
if (!errors_handled(ms))
return;
if (m != get_default_mirror(ms))
goto out;
if (!ms->in_sync) {
/*
* Better to issue requests to same failing device
* than to risk returning corrupt data.
*/
DMERR("Primary mirror (%s) failed while out-of-sync: "
"Reads may fail.", m->dev->name);
goto out;
}
new = get_valid_mirror(ms);
if (new)
set_default_mirror(new);
else
DMWARN("All sides of mirror have failed.");
out:
schedule_work(&ms->trigger_event);
}
static int mirror_flush(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
unsigned long error_bits;
unsigned int i;
struct dm_io_region io[ms->nr_mirrors];
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
};
for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
io[i].bdev = m->dev->bdev;
io[i].sector = 0;
io[i].count = 0;
}
error_bits = -1;
dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
if (unlikely(error_bits != 0)) {
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error_bits))
fail_mirror(ms->mirror + i,
DM_RAID1_FLUSH_ERROR);
return -EIO;
}
return 0;
}
/*-----------------------------------------------------------------
* Recovery.
*
* When a mirror is first activated we may find that some regions
* are in the no-sync state. We have to recover these by
* recopying from the default mirror to all the others.
*---------------------------------------------------------------*/
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
struct dm_region *reg = context;
struct mirror_set *ms = dm_rh_region_context(reg);
int m, bit = 0;
if (read_err) {
/* Read error means the failure of default mirror. */
DMERR_LIMIT("Unable to read primary mirror during recovery");
fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
}
if (write_err) {
DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
write_err);
/*
* Bits correspond to devices (excluding default mirror).
* The default mirror cannot change during recovery.
*/
for (m = 0; m < ms->nr_mirrors; m++) {
if (&ms->mirror[m] == get_default_mirror(ms))
continue;
if (test_bit(bit, &write_err))
fail_mirror(ms->mirror + m,
DM_RAID1_SYNC_ERROR);
bit++;
}
}
dm_rh_recovery_end(reg, !(read_err || write_err));
}
static int recover(struct mirror_set *ms, struct dm_region *reg)
{
int r;
unsigned i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
region_t key = dm_rh_get_region_key(reg);
sector_t region_size = dm_rh_get_region_size(ms->rh);
/* fill in the source */
m = get_default_mirror(ms);
from.bdev = m->dev->bdev;
from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
if (key == (ms->nr_regions - 1)) {
/*
* The final region may be smaller than
* region_size.
*/
from.count = ms->ti->len & (region_size - 1);
if (!from.count)
from.count = region_size;
} else
from.count = region_size;
/* fill in the destinations */
for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
if (&ms->mirror[i] == get_default_mirror(ms))
continue;
m = ms->mirror + i;
dest->bdev = m->dev->bdev;
dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
dest->count = from.count;
dest++;
}
/* hand to kcopyd */
if (!errors_handled(ms))
set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
flags, recovery_complete, reg);
return r;
}
static void do_recovery(struct mirror_set *ms)
{
struct dm_region *reg;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
int r;
/*
* Start quiescing some regions.
*/
dm_rh_recovery_prepare(ms->rh);
/*
* Copy any already quiesced regions.
*/
while ((reg = dm_rh_recovery_start(ms->rh))) {
r = recover(ms, reg);
if (r)
dm_rh_recovery_end(reg, 0);
}
/*
* Update the in sync flag.
*/
if (!ms->in_sync &&
(log->type->get_sync_count(log) == ms->nr_regions)) {
/* the sync is complete */
dm_table_event(ms->ti->table);
ms->in_sync = 1;
}
}
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
struct mirror *m = get_default_mirror(ms);
do {
if (likely(!atomic_read(&m->error_count)))
return m;
if (m-- == ms->mirror)
m += ms->nr_mirrors;
} while (m != get_default_mirror(ms));
return NULL;
}
static int default_ok(struct mirror *m)
{
struct mirror *default_mirror = get_default_mirror(m->ms);
return !atomic_read(&default_mirror->error_count);
}
static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
return 0;
}
/*
* remap a buffer to a particular mirror.
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
if (unlikely(!bio->bi_size))
return 0;
return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
bio->bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
struct bio *bio)
{
io->bdev = m->dev->bdev;
io->sector = map_sector(m, bio);
io->count = bio->bi_size >> 9;
}
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
/*
* Lock is required to avoid race condition during suspend
* process.
*/
spin_lock_irq(&ms->lock);
if (atomic_read(&ms->suspend)) {
spin_unlock_irq(&ms->lock);
/*
* If device is suspended, complete the bio.
*/
if (dm_noflush_suspending(ms->ti))
bio_endio(bio, DM_ENDIO_REQUEUE);
else
bio_endio(bio, -EIO);
return;
}
/*
* Hold bio until the suspend is complete.
*/
bio_list_add(&ms->holds, bio);
spin_unlock_irq(&ms->lock);
}
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
static void read_callback(unsigned long error, void *context)
{
struct bio *bio = context;
struct mirror *m;
m = bio_get_m(bio);
bio_set_m(bio, NULL);
if (likely(!error)) {
bio_endio(bio, 0);
return;
}
fail_mirror(m, DM_RAID1_READ_ERROR);
if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.",
m->dev->name);
queue_bio(m->ms, bio, bio_rw(bio));
return;
}
DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
m->dev->name);
bio_endio(bio, -EIO);
}
/* Asynchronous read. */
static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
};
map_region(&io, m, bio);
bio_set_m(bio, m);
BUG_ON(dm_io(&io_req, 1, &io, NULL));
}
static inline int region_in_sync(struct mirror_set *ms, region_t region,
int may_block)
{
int state = dm_rh_get_state(ms->rh, region, may_block);
return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
}
static void do_reads(struct mirror_set *ms, struct bio_list *reads)
{
region_t region;
struct bio *bio;
struct mirror *m;
while ((bio = bio_list_pop(reads))) {
region = dm_rh_bio_to_region(ms->rh, bio);
m = get_default_mirror(ms);
/*
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
m = choose_mirror(ms, bio->bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
if (likely(m))
read_async_bio(m, bio);
else
bio_endio(bio, -EIO);
}
}
/*-----------------------------------------------------------------
* Writes.
*
* We do different things with the write io depending on the
* state of the region that it's in:
*
* SYNC: increment pending, use kcopyd to write to *all* mirrors
* RECOVERING: delay the io until recovery completes
* NOSYNC: increment pending, just write to the default mirror
*---------------------------------------------------------------*/
static void write_callback(unsigned long error, void *context)
{
unsigned i, ret = 0;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
unsigned long flags;
ms = bio_get_m(bio)->ms;
bio_set_m(bio, NULL);
/*
* NOTE: We don't decrement the pending count here,
* instead it is done by the targets endio function.
* This way we handle both writes to SYNC and NOSYNC
* regions with the same code.
*/
if (likely(!error)) {
bio_endio(bio, ret);
return;
}
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
/*
* Need to raise event. Since raising
* events can block, we need to do it in
* the main thread.
*/
spin_lock_irqsave(&ms->lock, flags);
if (!ms->failures.head)
should_wake = 1;
bio_list_add(&ms->failures, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
{
unsigned int i;
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
};
if (bio->bi_rw & REQ_DISCARD) {
io_req.bi_rw |= REQ_DISCARD;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
map_region(dest++, m, bio);
/*
* Use default mirror because we only need it to retrieve the reference
* to the mirror set in write_callback().
*/
bio_set_m(bio, get_default_mirror(ms));
BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
{
int state;
struct bio *bio;
struct bio_list sync, nosync, recover, *this_list = NULL;
struct bio_list requeue;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
region_t region;
if (!writes->head)
return;
/*
* Classify each write.
*/
bio_list_init(&sync);
bio_list_init(&nosync);
bio_list_init(&recover);
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
if ((bio->bi_rw & REQ_FLUSH) ||
(bio->bi_rw & REQ_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->is_remote_recovering &&
log->type->is_remote_recovering(log, region)) {
bio_list_add(&requeue, bio);
continue;
}
state = dm_rh_get_state(ms->rh, region, 1);
switch (state) {
case DM_RH_CLEAN:
case DM_RH_DIRTY:
this_list = &sync;
break;
case DM_RH_NOSYNC:
this_list = &nosync;
break;
case DM_RH_RECOVERING:
this_list = &recover;
break;
}
bio_list_add(this_list, bio);
}
/*
* Add bios that are delayed due to remote recovery
* back on to the write queue
*/
if (unlikely(requeue.head)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->writes, &requeue);
spin_unlock_irq(&ms->lock);
delayed_wake(ms);
}
/*
* Increment the pending counts for any regions that will
* be written to (writes to recover regions are going to
* be delayed).
*/
dm_rh_inc_pending(ms->rh, &sync);
dm_rh_inc_pending(ms->rh, &nosync);
/*
* If the flush fails on a previous call and succeeds here,
* we must not reset the log_failure variable. We need
* userspace interaction to do that.
*/
ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
/*
* Dispatch io.
*/
if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
wakeup_mirrord(ms);
} else
while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
if (unlikely(ms->leg_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_add(&ms->failures, bio);
spin_unlock_irq(&ms->lock);
wakeup_mirrord(ms);
} else {
map_bio(get_default_mirror(ms), bio);
generic_make_request(bio);
}
}
}
static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
struct bio *bio;
if (likely(!failures->head))
return;
/*
* If the log has failed, unattempted writes are being
* put on the holds list. We can't issue those writes
* until a log has been marked, so we must store them.
*
* If a 'noflush' suspend is in progress, we can requeue
* the I/O's to the core. This give userspace a chance
* to reconfigure the mirror, at which point the core
* will reissue the writes. If the 'noflush' flag is
* not set, we have no choice but to return errors.
*
* Some writes on the failures list may have been
* submitted before the log failure and represent a
* failure to write to one of the devices. It is ok
* for us to treat them the same and requeue them
* as well.
*/
while ((bio = bio_list_pop(failures))) {
if (!ms->log_failure) {
ms->in_sync = 0;
dm_rh_mark_nosync(ms->rh, bio);
}
/*
* If all the legs are dead, fail the I/O.
* If we have been told to handle errors, hold the bio
* and wait for userspace to deal with the problem.
* Otherwise pretend that the I/O succeeded. (This would
* be wrong if the failed leg returned after reboot and
* got replicated back to the good legs.)
*/
if (!get_valid_mirror(ms))
bio_endio(bio, -EIO);
else if (errors_handled(ms))
hold_bio(ms, bio);
else
bio_endio(bio, 0);
}
}
static void trigger_event(struct work_struct *work)
{
struct mirror_set *ms =
container_of(work, struct mirror_set, trigger_event);
dm_table_event(ms->ti->table);
}
/*-----------------------------------------------------------------
* kmirrord
*---------------------------------------------------------------*/
static void do_mirror(struct work_struct *work)
{
struct mirror_set *ms = container_of(work, struct mirror_set,
kmirrord_work);
struct bio_list reads, writes, failures;
unsigned long flags;
spin_lock_irqsave(&ms->lock, flags);
reads = ms->reads;
writes = ms->writes;
failures = ms->failures;
bio_list_init(&ms->reads);
bio_list_init(&ms->writes);
bio_list_init(&ms->failures);
spin_unlock_irqrestore(&ms->lock, flags);
dm_rh_update_states(ms->rh, errors_handled(ms));
do_recovery(ms);
do_reads(ms, &reads);
do_writes(ms, &writes);
do_failures(ms, &failures);
}
/*-----------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
uint32_t region_size,
struct dm_target *ti,
struct dm_dirty_log *dl)
{
size_t len;
struct mirror_set *ms = NULL;
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
ms = kzalloc(len, GFP_KERNEL);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return NULL;
}
spin_lock_init(&ms->lock);
bio_list_init(&ms->reads);
bio_list_init(&ms->writes);
bio_list_init(&ms->failures);
bio_list_init(&ms->holds);
ms->ti = ti;
ms->nr_mirrors = nr_mirrors;
ms->nr_regions = dm_sector_div_up(ti->len, region_size);
ms->in_sync = 0;
ms->log_failure = 0;
ms->leg_failure = 0;
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
_dm_raid1_read_record_cache);
if (!ms->read_record_pool) {
ti->error = "Error creating mirror read_record_pool";
kfree(ms);
return NULL;
}
ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
wakeup_all_recovery_waiters,
ms->ti->begin, MAX_RECOVERY,
dl, region_size, ms->nr_regions);
if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
return ms;
}
static void free_context(struct mirror_set *ms, struct dm_target *ti,
unsigned int m)
{
while (m--)
dm_put_device(ti, ms->mirror[m].dev);
dm_io_client_destroy(ms->io_client);
dm_region_hash_destroy(ms->rh);
mempool_destroy(ms->read_record_pool);
kfree(ms);
}
static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
unsigned int mirror, char **argv)
{
unsigned long long offset;
char dummy;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset";
return -EINVAL;
}
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) {
ti->error = "Device lookup failure";
return -ENXIO;
}
ms->mirror[mirror].ms = ms;
atomic_set(&(ms->mirror[mirror].error_count), 0);
ms->mirror[mirror].error_type = 0;
ms->mirror[mirror].offset = offset;
return 0;
}
/*
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
unsigned argc, char **argv,
unsigned *args_used)
{
unsigned param_count;
struct dm_dirty_log *dl;
char dummy;
if (argc < 2) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {
ti->error = "Invalid mirror log argument count";
return NULL;
}
*args_used = 2 + param_count;
if (argc < *args_used) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
argv + 2);
if (!dl) {
ti->error = "Error creating mirror dirty log";
return NULL;
}
return dl;
}
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
unsigned *args_used)
{
unsigned num_features;
struct dm_target *ti = ms->ti;
char dummy;
*args_used = 0;
if (!argc)
return 0;
if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
ti->error = "Invalid number of features";
return -EINVAL;
}
argc--;
argv++;
(*args_used)++;
if (num_features > argc) {
ti->error = "Not enough arguments to support feature count";
return -EINVAL;
}
if (!strcmp("handle_errors", argv[0]))
ms->features |= DM_RAID1_HANDLE_ERRORS;
else {
ti->error = "Unrecognised feature requested";
return -EINVAL;
}
(*args_used)++;
return 0;
}
/*
* Construct a mirror mapping:
*
* log_type #log_params <log_params>
* #mirrors [mirror_path offset]{2,}
* [#features <features>]
*
* log_type is "core" or "disk"
* #log_params is between 1 and 3
*
* If present, features must be "handle_errors".
*/
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
unsigned int nr_mirrors, m, args_used;
struct mirror_set *ms;
struct dm_dirty_log *dl;
char dummy;
dl = create_dirty_log(ti, argc, argv, &args_used);
if (!dl)
return -EINVAL;
argv += args_used;
argc -= args_used;
if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
ti->error = "Invalid number of mirrors";
dm_dirty_log_destroy(dl);
return -EINVAL;
}
argv++, argc--;
if (argc < nr_mirrors * 2) {
ti->error = "Too few mirror arguments";
dm_dirty_log_destroy(dl);
return -EINVAL;
}
ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
if (!ms) {
dm_dirty_log_destroy(dl);
return -ENOMEM;
}
/* Get the mirror parameter sets */
for (m = 0; m < nr_mirrors; m++) {
r = get_mirror(ms, ti, m, argv);
if (r) {
free_context(ms, ti, m);
return r;
}
argv += 2;
argc -= 2;
}
ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
goto err_free_context;
}
INIT_WORK(&ms->kmirrord_work, do_mirror);
init_timer(&ms->timer);
ms->timer_pending = 0;
INIT_WORK(&ms->trigger_event, trigger_event);
r = parse_features(ms, argc, argv, &args_used);
if (r)
goto err_destroy_wq;
argv += args_used;
argc -= args_used;
/*
* Any read-balancing addition depends on the
* DM_RAID1_HANDLE_ERRORS flag being present.
* This is because the decision to balance depends
* on the sync state of a region. If the above
* flag is not present, we ignore errors; and
* the sync state may be inaccurate.
*/
if (argc) {
ti->error = "Too many mirror arguments";
r = -EINVAL;
goto err_destroy_wq;
}
ms->kcopyd_client = dm_kcopyd_client_create();
if (IS_ERR(ms->kcopyd_client)) {
r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
}
wakeup_mirrord(ms);
return 0;
err_destroy_wq:
destroy_workqueue(ms->kmirrord_wq);
err_free_context:
free_context(ms, ti, ms->nr_mirrors);
return r;
}
static void mirror_dtr(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
del_timer_sync(&ms->timer);
flush_workqueue(ms->kmirrord_wq);
flush_work_sync(&ms->trigger_event);
dm_kcopyd_client_destroy(ms->kcopyd_client);
destroy_workqueue(ms->kmirrord_wq);
free_context(ms, ti, ms->nr_mirrors);
}
/*
* Mirror mapping function
*/
static int mirror_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
int r, rw = bio_rw(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_raid1_read_record *read_record = NULL;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
if (r < 0 && r != -EWOULDBLOCK)
return r;
/*
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
if (rw == READA)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
/*
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
m = choose_mirror(ms, bio->bi_sector);
if (unlikely(!m))
return -EIO;
read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
if (likely(read_record)) {
dm_bio_record(&read_record->details, bio);
map_context->ptr = read_record;
read_record->m = m;
}
map_bio(m, bio);
return DM_MAPIO_REMAPPED;
}
static int mirror_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
int rw = bio_rw(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
struct dm_raid1_read_record *read_record = map_context->ptr;
/*
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
if (!(bio->bi_rw & REQ_FLUSH))
dm_rh_dec(ms->rh, map_context->ll);
return error;
}
if (error == -EOPNOTSUPP)
goto out;
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
if (!read_record) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
* mirror in-sync.
*/
DMERR_LIMIT("Mirror read failed.");
return -EIO;
}
m = read_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
m->dev->name);
fail_mirror(m, DM_RAID1_READ_ERROR);
/*
* A failed read is requeued for another attempt using an intact
* mirror.
*/
if (default_ok(m) || mirror_available(ms, bio)) {
bd = &read_record->details;
dm_bio_restore(bd, bio);
mempool_free(read_record, ms->read_record_pool);
map_context->ptr = NULL;
queue_bio(ms, bio, rw);
return 1;
}
DMERR("All replicated volumes dead, failing I/O");
}
out:
if (read_record) {
mempool_free(read_record, ms->read_record_pool);
map_context->ptr = NULL;
}
return error;
}
static void mirror_presuspend(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
struct bio_list holds;
struct bio *bio;
atomic_set(&ms->suspend, 1);
/*
* Process bios in the hold list to start recovery waiting
* for bios in the hold list. After the process, no bio has
* a chance to be added in the hold list because ms->suspend
* is set.
*/
spin_lock_irq(&ms->lock);
holds = ms->holds;
bio_list_init(&ms->holds);
spin_unlock_irq(&ms->lock);
while ((bio = bio_list_pop(&holds)))
hold_bio(ms, bio);
/*
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
dm_rh_stop_recovery(ms->rh);
wait_event(_kmirrord_recovery_stopped,
!dm_rh_recovery_in_flight(ms->rh));
if (log->type->presuspend && log->type->presuspend(log))
/* FIXME: need better error handling */
DMWARN("log presuspend failed");
/*
* Now that recovery is complete/stopped and the
* delayed bios are queued, we need to wait for
* the worker thread to complete. This way,
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
}
static void mirror_postsuspend(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (log->type->postsuspend && log->type->postsuspend(log))
/* FIXME: need better error handling */
DMWARN("log postsuspend failed");
}
static void mirror_resume(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 0);
if (log->type->resume && log->type->resume(log))
/* FIXME: need better error handling */
DMWARN("log resume failed");
dm_rh_start_recovery(ms->rh);
}
/*
* device_status_char
* @m: mirror device/leg we want the status of
*
* We return one character representing the most severe error
* we have encountered.
* A => Alive - No failures
* D => Dead - A write failure occurred leaving mirror out-of-sync
* S => Sync - A sychronization failure occurred, mirror out-of-sync
* R => Read - A read failure occurred, mirror data unaffected
*
* Returns: <char>
*/
static char device_status_char(struct mirror *m)
{
if (!atomic_read(&(m->error_count)))
return 'A';
return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}
static int mirror_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1];
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++) {
DMEMIT("%s ", ms->mirror[m].dev->name);
buffer[m] = device_status_char(&(ms->mirror[m]));
}
buffer[m] = '\0';
DMEMIT("%llu/%llu 1 %s ",
(unsigned long long)log->type->get_sync_count(log),
(unsigned long long)ms->nr_regions, buffer);
sz += log->type->status(log, type, result+sz, maxlen-sz);
break;
case STATUSTYPE_TABLE:
sz = log->type->status(log, type, result, maxlen);
DMEMIT("%d", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT(" %s %llu", ms->mirror[m].dev->name,
(unsigned long long)ms->mirror[m].offset);
if (ms->features & DM_RAID1_HANDLE_ERRORS)
DMEMIT(" 1 handle_errors");
}
return 0;
}
static int mirror_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct mirror_set *ms = ti->private;
int ret = 0;
unsigned i;
for (i = 0; !ret && i < ms->nr_mirrors; i++)
ret = fn(ti, ms->mirror[i].dev,
ms->mirror[i].offset, ti->len, data);
return ret;
}
static struct target_type mirror_target = {
.name = "mirror",
.version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
.map = mirror_map,
.end_io = mirror_end_io,
.presuspend = mirror_presuspend,
.postsuspend = mirror_postsuspend,
.resume = mirror_resume,
.status = mirror_status,
.iterate_devices = mirror_iterate_devices,
};
static int __init dm_mirror_init(void)
{
int r;
_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
if (!_dm_raid1_read_record_cache) {
DMERR("Can't allocate dm_raid1_read_record cache");
r = -ENOMEM;
goto bad_cache;
}
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("Failed to register mirror target");
goto bad_target;
}
return 0;
bad_target:
kmem_cache_destroy(_dm_raid1_read_record_cache);
bad_cache:
return r;
}
static void __exit dm_mirror_exit(void)
{
dm_unregister_target(&mirror_target);
kmem_cache_destroy(_dm_raid1_read_record_cache);
}
/* Module hooks */
module_init(dm_mirror_init);
module_exit(dm_mirror_exit);
MODULE_DESCRIPTION(DM_NAME " mirror target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TeamMac/android_kernel_huawei_p6-u06 | arch/arm/plat-s3c24xx/gpio.c | 3853 | 2489 | /* linux/arch/arm/plat-s3c24xx/gpio.c
*
* Copyright (c) 2004-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX GPIO support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/gpio-fns.h>
#include <asm/irq.h>
#include <mach/regs-gpio.h>
#include <plat/gpio-core.h>
/* gpiolib wrappers until these are totally eliminated */
void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
{
int ret;
WARN_ON(to); /* should be none of these left */
if (!to) {
/* if pull is enabled, try first with up, and if that
* fails, try using down */
ret = s3c_gpio_setpull(pin, S3C_GPIO_PULL_UP);
if (ret)
s3c_gpio_setpull(pin, S3C_GPIO_PULL_DOWN);
} else {
s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);
}
}
EXPORT_SYMBOL(s3c2410_gpio_pullup);
void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
{
/* do this via gpiolib until all users removed */
gpio_request(pin, "temporary");
gpio_set_value(pin, to);
gpio_free(pin);
}
EXPORT_SYMBOL(s3c2410_gpio_setpin);
unsigned int s3c2410_gpio_getpin(unsigned int pin)
{
struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
unsigned long offs = pin - chip->chip.base;
return __raw_readl(chip->base + 0x04) & (1<< offs);
}
EXPORT_SYMBOL(s3c2410_gpio_getpin);
unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
{
unsigned long flags;
unsigned long misccr;
local_irq_save(flags);
misccr = __raw_readl(S3C24XX_MISCCR);
misccr &= ~clear;
misccr ^= change;
__raw_writel(misccr, S3C24XX_MISCCR);
local_irq_restore(flags);
return misccr;
}
EXPORT_SYMBOL(s3c2410_modify_misccr);
| gpl-2.0 |
dh-electronics/linux-imx25 | arch/alpha/oprofile/op_model_ev6.c | 4877 | 2692 | /**
* @file arch/alpha/oprofile/op_model_ev6.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author Richard Henderson <rth@twiddle.net>
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include "op_impl.h"
/* Compute all of the registers in preparation for enabling profiling. */
static void
ev6_reg_setup(struct op_register_config *reg,
struct op_counter_config *ctr,
struct op_system_config *sys)
{
unsigned long ctl, reset, need_reset, i;
/* Select desired events. We've mapped the event numbers
such that they fit directly into the event selection fields. */
ctl = 0;
if (ctr[0].enabled && ctr[0].event)
ctl |= (ctr[0].event & 1) << 4;
if (ctr[1].enabled)
ctl |= (ctr[1].event - 2) & 15;
reg->mux_select = ctl;
/* Select logging options. */
/* ??? Need to come up with some mechanism to trace only
selected processes. EV6 does not have a mechanism to
select kernel or user mode only. For now, enable always. */
reg->proc_mode = 0;
/* EV6 cannot change the width of the counters as with the
other implementations. But fortunately, we can write to
the counters and set the value such that it will overflow
at the right time. */
reset = need_reset = 0;
for (i = 0; i < 2; ++i) {
unsigned long count = ctr[i].count;
if (!ctr[i].enabled)
continue;
if (count > 0x100000)
count = 0x100000;
ctr[i].count = count;
reset |= (0x100000 - count) << (i ? 6 : 28);
if (count != 0x100000)
need_reset |= 1 << i;
}
reg->reset_values = reset;
reg->need_reset = need_reset;
}
/* Program all of the registers in preparation for enabling profiling. */
static void
ev6_cpu_setup (void *x)
{
struct op_register_config *reg = x;
wrperfmon(2, reg->mux_select);
wrperfmon(3, reg->proc_mode);
wrperfmon(6, reg->reset_values | 3);
}
/* CTR is a counter for which the user has requested an interrupt count
in between one of the widths selectable in hardware. Reset the count
for CTR to the value stored in REG->RESET_VALUES. */
static void
ev6_reset_ctr(struct op_register_config *reg, unsigned long ctr)
{
wrperfmon(6, reg->reset_values | (1 << ctr));
}
static void
ev6_handle_interrupt(unsigned long which, struct pt_regs *regs,
struct op_counter_config *ctr)
{
/* Record the sample. */
oprofile_add_sample(regs, which);
}
struct op_axp_model op_model_ev6 = {
.reg_setup = ev6_reg_setup,
.cpu_setup = ev6_cpu_setup,
.reset_ctr = ev6_reset_ctr,
.handle_interrupt = ev6_handle_interrupt,
.cpu_type = "alpha/ev6",
.num_counters = 2,
.can_set_proc_mode = 0,
};
| gpl-2.0 |
SerenityS/IM-A800S_Kernel | drivers/isdn/divert/divert_init.c | 5133 | 2399 | /* $Id divert_init.c,v 1.5.6.2 2001/01/24 22:18:17 kai Exp $
*
* Module init for DSS1 diversion services for i4l.
*
* Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include "isdn_divert.h"
MODULE_DESCRIPTION("ISDN4Linux: Call diversion support");
MODULE_AUTHOR("Werner Cornelius");
MODULE_LICENSE("GPL");
/****************************************/
/* structure containing interface to hl */
/****************************************/
isdn_divert_if divert_if =
{ DIVERT_IF_MAGIC, /* magic value */
DIVERT_CMD_REG, /* register cmd */
ll_callback, /* callback routine from ll */
NULL, /* command still not specified */
NULL, /* drv_to_name */
NULL, /* name_to_drv */
};
/*************************/
/* Module interface code */
/* no cmd line parms */
/*************************/
static int __init divert_init(void)
{ int i;
if (divert_dev_init())
{ printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
return(-EIO);
}
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ divert_dev_deinit();
printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n",i);
return(-EIO);
}
printk(KERN_INFO "dss1_divert module successfully installed\n");
return(0);
}
/**********************/
/* Module deinit code */
/**********************/
static void __exit divert_exit(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&divert_lock, flags);
divert_if.cmd = DIVERT_CMD_REL; /* release */
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ printk(KERN_WARNING "dss1_divert: error %d releasing module\n",i);
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
if (divert_dev_deinit())
{ printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
spin_unlock_irqrestore(&divert_lock, flags);
deleterule(-1); /* delete all rules and free mem */
deleteprocs();
printk(KERN_INFO "dss1_divert module successfully removed \n");
}
module_init(divert_init);
module_exit(divert_exit);
| gpl-2.0 |
sbreen94/Zeus_D2vzw | drivers/isdn/hardware/eicon/mntfunc.c | 5133 | 8521 | /* $Id: mntfunc.c,v 1.19.6.4 2005/01/31 12:22:20 armin Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* Maint module
*
* Copyright 2000-2003 by Armin Schindler (mac@melware.de)
* Copyright 2000-2003 Cytronics & Melware (info@melware.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include "platform.h"
#include "di_defs.h"
#include "divasync.h"
#include "debug_if.h"
extern char *DRIVERRELEASE_MNT;
#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
extern void DIVA_DIDD_Read(void *, int);
static dword notify_handle;
static DESCRIPTOR DAdapter;
static DESCRIPTOR MAdapter;
static DESCRIPTOR MaintDescriptor =
{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
const void *src, int length);
extern int diva_os_copy_from_user(void *os_handle, void *dst,
const void __user *src, int length);
static void no_printf(unsigned char *x, ...)
{
/* dummy debug function */
}
#include "debuglib.c"
/*
* DIDD callback function
*/
static void *didd_callback(void *context, DESCRIPTOR * adapter,
int removal)
{
if (adapter->type == IDI_DADAPTER) {
DBG_ERR(("cb: Change in DAdapter ? Oops ?."));
} else if (adapter->type == IDI_DIMAINT) {
if (removal) {
DbgDeregister();
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
} else {
memcpy(&MAdapter, adapter, sizeof(MAdapter));
dprintf = (DIVA_DI_PRINTF) MAdapter.request;
DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT);
}
} else if ((adapter->type > 0) && (adapter->type < 16)) {
if (removal) {
diva_mnt_remove_xdi_adapter(adapter);
} else {
diva_mnt_add_xdi_adapter(adapter);
}
}
return (NULL);
}
/*
* connect to didd
*/
static int DIVA_INIT_FUNCTION connect_didd(void)
{
int x = 0;
int dadapter = 0;
IDI_SYNC_REQ req;
DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
for (x = 0; x < MAX_DESCRIPTORS; x++) {
if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
dadapter = 1;
memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc =
IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
req.didd_notify.info.callback = (void *)didd_callback;
req.didd_notify.info.context = NULL;
DAdapter.request((ENTITY *) & req);
if (req.didd_notify.e.Rc != 0xff)
return (0);
notify_handle = req.didd_notify.info.handle;
/* Register MAINT (me) */
req.didd_add_adapter.e.Req = 0;
req.didd_add_adapter.e.Rc =
IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
req.didd_add_adapter.info.descriptor =
(void *) &MaintDescriptor;
DAdapter.request((ENTITY *) & req);
if (req.didd_add_adapter.e.Rc != 0xff)
return (0);
} else if ((DIDD_Table[x].type > 0)
&& (DIDD_Table[x].type < 16)) {
diva_mnt_add_xdi_adapter(&DIDD_Table[x]);
}
}
return (dadapter);
}
/*
* disconnect from didd
*/
static void DIVA_EXIT_FUNCTION disconnect_didd(void)
{
IDI_SYNC_REQ req;
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
req.didd_notify.info.handle = notify_handle;
DAdapter.request((ENTITY *) & req);
req.didd_remove_adapter.e.Req = 0;
req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
req.didd_remove_adapter.info.p_request =
(IDI_CALL) MaintDescriptor.request;
DAdapter.request((ENTITY *) & req);
}
/*
* read/write maint
*/
int maint_read_write(void __user *buf, int count)
{
byte data[128];
dword cmd, id, mask;
int ret = 0;
if (count < (3 * sizeof(dword)))
return (-EFAULT);
if (diva_os_copy_from_user(NULL, (void *) &data[0],
buf, 3 * sizeof(dword))) {
return (-EFAULT);
}
cmd = *(dword *) & data[0]; /* command */
id = *(dword *) & data[4]; /* driver id */
mask = *(dword *) & data[8]; /* mask or size */
switch (cmd) {
case DITRACE_CMD_GET_DRIVER_INFO:
if ((ret = diva_get_driver_info(id, data, sizeof(data))) > 0) {
if ((count < ret) || diva_os_copy_to_user
(NULL, buf, (void *) &data[0], ret))
ret = -EFAULT;
} else {
ret = -EINVAL;
}
break;
case DITRACE_READ_DRIVER_DBG_MASK:
if ((ret = diva_get_driver_dbg_mask(id, (byte *) data)) > 0) {
if ((count < ret) || diva_os_copy_to_user
(NULL, buf, (void *) &data[0], ret))
ret = -EFAULT;
} else {
ret = -ENODEV;
}
break;
case DITRACE_WRITE_DRIVER_DBG_MASK:
if ((ret = diva_set_driver_dbg_mask(id, mask)) <= 0) {
ret = -ENODEV;
}
break;
/*
Filter commands will ignore the ID due to fact that filtering affects
the B- channel and Audio Tap trace levels only. Also MAINT driver will
select the right trace ID by itself
*/
case DITRACE_WRITE_SELECTIVE_TRACE_FILTER:
if (!mask) {
ret = diva_set_trace_filter (1, "*");
} else if (mask < sizeof(data)) {
if (diva_os_copy_from_user(NULL, data, (char __user *)buf+12, mask)) {
ret = -EFAULT;
} else {
ret = diva_set_trace_filter ((int)mask, data);
}
} else {
ret = -EINVAL;
}
break;
case DITRACE_READ_SELECTIVE_TRACE_FILTER:
if ((ret = diva_get_trace_filter (sizeof(data), data)) > 0) {
if (diva_os_copy_to_user (NULL, buf, data, ret))
ret = -EFAULT;
} else {
ret = -ENODEV;
}
break;
case DITRACE_READ_TRACE_ENTRY:{
diva_os_spin_lock_magic_t old_irql;
word size;
diva_dbg_entry_head_t *pmsg;
byte *pbuf;
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
for(;;) {
if (!(pmsg =
diva_maint_get_message(&size, &old_irql))) {
break;
}
if (size > mask) {
diva_maint_ack_message(0, &old_irql);
ret = -EINVAL;
break;
}
ret = size;
memcpy(pbuf, pmsg, size);
diva_maint_ack_message(1, &old_irql);
if ((count < size) ||
diva_os_copy_to_user (NULL, buf, (void *) pbuf, size))
ret = -EFAULT;
break;
}
diva_os_free(0, pbuf);
}
break;
case DITRACE_READ_TRACE_ENTRYS:{
diva_os_spin_lock_magic_t old_irql;
word size;
diva_dbg_entry_head_t *pmsg;
byte *pbuf = NULL;
int written = 0;
if (mask < 4096) {
ret = -EINVAL;
break;
}
if (!(pbuf = diva_os_malloc(0, mask))) {
return (-ENOMEM);
}
for (;;) {
if (!(pmsg =
diva_maint_get_message(&size, &old_irql))) {
break;
}
if ((size + 8) > mask) {
diva_maint_ack_message(0, &old_irql);
break;
}
/*
Write entry length
*/
pbuf[written++] = (byte) size;
pbuf[written++] = (byte) (size >> 8);
pbuf[written++] = 0;
pbuf[written++] = 0;
/*
Write message
*/
memcpy(&pbuf[written], pmsg, size);
diva_maint_ack_message(1, &old_irql);
written += size;
mask -= (size + 4);
}
pbuf[written++] = 0;
pbuf[written++] = 0;
pbuf[written++] = 0;
pbuf[written++] = 0;
if ((count < written) || diva_os_copy_to_user(NULL, buf, (void *) pbuf, written)) {
ret = -EFAULT;
} else {
ret = written;
}
diva_os_free(0, pbuf);
}
break;
default:
ret = -EINVAL;
}
return (ret);
}
/*
* init
*/
int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
unsigned long diva_dbg_mem)
{
if (*buffer_length < 64) {
*buffer_length = 64;
}
if (*buffer_length > 512) {
*buffer_length = 512;
}
*buffer_length *= 1024;
if (diva_dbg_mem) {
*buffer = (void *) diva_dbg_mem;
} else {
while ((*buffer_length >= (64 * 1024))
&&
(!(*buffer = diva_os_malloc (0, *buffer_length)))) {
*buffer_length -= 1024;
}
if (!*buffer) {
DBG_ERR(("init: Can not alloc trace buffer"));
return (0);
}
}
if (diva_maint_init(*buffer, *buffer_length, (diva_dbg_mem == 0))) {
if (!diva_dbg_mem) {
diva_os_free (0, *buffer);
}
DBG_ERR(("init: maint init failed"));
return (0);
}
if (!connect_didd()) {
DBG_ERR(("init: failed to connect to DIDD."));
diva_maint_finit();
if (!diva_dbg_mem) {
diva_os_free (0, *buffer);
}
return (0);
}
return (1);
}
/*
* exit
*/
void DIVA_EXIT_FUNCTION mntfunc_finit(void)
{
void *buffer;
int i = 100;
DbgDeregister();
while (diva_mnt_shutdown_xdi_adapters() && i--) {
diva_os_sleep(10);
}
disconnect_didd();
if ((buffer = diva_maint_finit())) {
diva_os_free (0, buffer);
}
memset(&MAdapter, 0, sizeof(MAdapter));
dprintf = no_printf;
}
| gpl-2.0 |
0mark/linux-sunxi | drivers/media/video/cx18/cx18-av-firmware.c | 9229 | 7232 | /*
* cx18 ADEC firmware functions
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include "cx18-driver.h"
#include "cx18-io.h"
#include <linux/firmware.h>
#define CX18_AUDIO_ENABLE 0xc72014
#define CX18_AI1_MUX_MASK 0x30
#define CX18_AI1_MUX_I2S1 0x00
#define CX18_AI1_MUX_I2S2 0x10
#define CX18_AI1_MUX_843_I2S 0x20
#define CX18_AI1_MUX_INVALID 0x30
#define FWFILE "v4l-cx23418-dig.fw"
static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
{
struct v4l2_subdev *sd = &cx->av_state.sd;
int ret = 0;
const u8 *data;
u32 size;
int addr;
u32 expected, dl_control;
/* Ensure we put the 8051 in reset and enable firmware upload mode */
dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
do {
dl_control &= 0x00ffffff;
dl_control |= 0x0f000000;
cx18_av_write4_noretry(cx, CXADEC_DL_CTL, dl_control);
dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
} while ((dl_control & 0xff000000) != 0x0f000000);
/* Read and auto increment until at address 0x0000 */
while (dl_control & 0x3fff)
dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
data = fw->data;
size = fw->size;
for (addr = 0; addr < size; addr++) {
dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
if (expected != dl_control) {
CX18_ERR_DEV(sd, "verification of %s firmware load "
"failed: expected %#010x got %#010x\n",
FWFILE, expected, dl_control);
ret = -EIO;
break;
}
dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
}
if (ret == 0)
CX18_INFO_DEV(sd, "verified load of %s firmware (%d bytes)\n",
FWFILE, size);
return ret;
}
int cx18_av_loadfw(struct cx18 *cx)
{
struct v4l2_subdev *sd = &cx->av_state.sd;
const struct firmware *fw = NULL;
u32 size;
u32 u, v;
const u8 *ptr;
int i;
int retries1 = 0;
if (request_firmware(&fw, FWFILE, &cx->pci_dev->dev) != 0) {
CX18_ERR_DEV(sd, "unable to open firmware %s\n", FWFILE);
return -EINVAL;
}
/* The firmware load often has byte errors, so allow for several
retries, both at byte level and at the firmware load level. */
while (retries1 < 5) {
cx18_av_write4_expect(cx, CXADEC_CHIP_CTRL, 0x00010000,
0x00008430, 0xffffffff); /* cx25843 */
cx18_av_write_expect(cx, CXADEC_STD_DET_CTL, 0xf6, 0xf6, 0xff);
/* Reset the Mako core, Register is alias of CXADEC_CHIP_CTRL */
cx18_av_write4_expect(cx, 0x8100, 0x00010000,
0x00008430, 0xffffffff); /* cx25843 */
/* Put the 8051 in reset and enable firmware upload */
cx18_av_write4_noretry(cx, CXADEC_DL_CTL, 0x0F000000);
ptr = fw->data;
size = fw->size;
for (i = 0; i < size; i++) {
u32 dl_control = 0x0F000000 | i | ((u32)ptr[i] << 16);
u32 value = 0;
int retries2;
int unrec_err = 0;
for (retries2 = 0; retries2 < CX18_MAX_MMIO_WR_RETRIES;
retries2++) {
cx18_av_write4_noretry(cx, CXADEC_DL_CTL,
dl_control);
udelay(10);
value = cx18_av_read4(cx, CXADEC_DL_CTL);
if (value == dl_control)
break;
/* Check if we can correct the byte by changing
the address. We can only write the lower
address byte of the address. */
if ((value & 0x3F00) != (dl_control & 0x3F00)) {
unrec_err = 1;
break;
}
}
if (unrec_err || retries2 >= CX18_MAX_MMIO_WR_RETRIES)
break;
}
if (i == size)
break;
retries1++;
}
if (retries1 >= 5) {
CX18_ERR_DEV(sd, "unable to load firmware %s\n", FWFILE);
release_firmware(fw);
return -EIO;
}
cx18_av_write4_expect(cx, CXADEC_DL_CTL,
0x03000000 | fw->size, 0x03000000, 0x13000000);
CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size);
if (cx18_av_verifyfw(cx, fw) == 0)
cx18_av_write4_expect(cx, CXADEC_DL_CTL,
0x13000000 | fw->size, 0x13000000, 0x13000000);
/* Output to the 416 */
cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000);
/* Audio input control 1 set to Sony mode */
/* Audio output input 2 is 0 for slave operation input */
/* 0xC4000914[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
/* 0xC4000914[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
after WS transition for first bit of audio word. */
cx18_av_write4(cx, CXADEC_I2S_IN_CTL, 0x000000A0);
/* Audio output control 1 is set to Sony mode */
/* Audio output control 2 is set to 1 for master mode */
/* 0xC4000918[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
/* 0xC4000918[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
after WS transition for first bit of audio word. */
/* 0xC4000918[8]: 0 = slave operation, 1 = master (SCK_OUT and WS_OUT
are generated) */
cx18_av_write4(cx, CXADEC_I2S_OUT_CTL, 0x000001A0);
/* set alt I2s master clock to /0x16 and enable alt divider i2s
passthrough */
cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5600B687);
cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, 0x000000F6, 0x000000F6,
0x3F00FFFF);
/* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */
/* Set bit 0 in register 0x9CC to signify that this is MiniMe. */
/* Register 0x09CC is defined by the Merlin firmware, and doesn't
have a name in the spec. */
cx18_av_write4(cx, 0x09CC, 1);
v = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
/* If bit 11 is 1, clear bit 10 */
if (v & 0x800)
cx18_write_reg_expect(cx, v & 0xFFFFFBFF, CX18_AUDIO_ENABLE,
0, 0x400);
/* Toggle the AI1 MUX */
v = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
u = v & CX18_AI1_MUX_MASK;
v &= ~CX18_AI1_MUX_MASK;
if (u == CX18_AI1_MUX_843_I2S || u == CX18_AI1_MUX_INVALID) {
/* Switch to I2S1 */
v |= CX18_AI1_MUX_I2S1;
cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
v, CX18_AI1_MUX_MASK);
/* Switch back to the A/V decoder core I2S output */
v = (v & ~CX18_AI1_MUX_MASK) | CX18_AI1_MUX_843_I2S;
} else {
/* Switch to the A/V decoder core I2S output */
v |= CX18_AI1_MUX_843_I2S;
cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
v, CX18_AI1_MUX_MASK);
/* Switch back to I2S1 or I2S2 */
v = (v & ~CX18_AI1_MUX_MASK) | u;
}
cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
v, CX18_AI1_MUX_MASK);
/* Enable WW auto audio standard detection */
v = cx18_av_read4(cx, CXADEC_STD_DET_CTL);
v |= 0xFF; /* Auto by default */
v |= 0x400; /* Stereo by default */
v |= 0x14000000;
cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF);
release_firmware(fw);
return 0;
}
| gpl-2.0 |
crpalmer/dna-kernel-plus-upstream | drivers/isdn/hardware/eicon/debuglib.c | 9741 | 4506 |
/*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "debuglib.h"
#ifdef DIVA_NO_DEBUGLIB
static DIVA_DI_PRINTF dprintf;
#else /* DIVA_NO_DEBUGLIB */
_DbgHandle_ myDriverDebugHandle = { 0 /*!Registered*/, DBG_HANDLE_VERSION };
DIVA_DI_PRINTF dprintf = no_printf;
/*****************************************************************************/
#define DBG_FUNC(name) \
void \
myDbgPrint_##name(char *format, ...) \
{ va_list ap; \
if (myDriverDebugHandle.dbg_prt) \
{ va_start(ap, format); \
(myDriverDebugHandle.dbg_prt) \
(myDriverDebugHandle.id, DLI_##name, format, ap); \
va_end(ap); \
} }
DBG_FUNC(LOG)
DBG_FUNC(FTL)
DBG_FUNC(ERR)
DBG_FUNC(TRC)
DBG_FUNC(MXLOG)
DBG_FUNC(FTL_MXLOG)
void
myDbgPrint_EVL(long msgID, ...)
{ va_list ap;
if (myDriverDebugHandle.dbg_ev)
{ va_start(ap, msgID);
(myDriverDebugHandle.dbg_ev)
(myDriverDebugHandle.id, (unsigned long)msgID, ap);
va_end(ap);
} }
DBG_FUNC(REG)
DBG_FUNC(MEM)
DBG_FUNC(SPL)
DBG_FUNC(IRP)
DBG_FUNC(TIM)
DBG_FUNC(BLK)
DBG_FUNC(TAPI)
DBG_FUNC(NDIS)
DBG_FUNC(CONN)
DBG_FUNC(STAT)
DBG_FUNC(SEND)
DBG_FUNC(RECV)
DBG_FUNC(PRV0)
DBG_FUNC(PRV1)
DBG_FUNC(PRV2)
DBG_FUNC(PRV3)
/*****************************************************************************/
int
DbgRegister(char *drvName, char *drvTag, unsigned long dbgMask)
{
int len;
/*
* deregister (if already registered) and zero out myDriverDebugHandle
*/
DbgDeregister();
/*
* initialize the debug handle
*/
myDriverDebugHandle.Version = DBG_HANDLE_VERSION;
myDriverDebugHandle.id = -1;
myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
len = strlen(drvName);
memcpy(myDriverDebugHandle.drvName, drvName,
(len < sizeof(myDriverDebugHandle.drvName)) ?
len : sizeof(myDriverDebugHandle.drvName) - 1);
len = strlen(drvTag);
memcpy(myDriverDebugHandle.drvTag, drvTag,
(len < sizeof(myDriverDebugHandle.drvTag)) ?
len : sizeof(myDriverDebugHandle.drvTag) - 1);
/*
* Try to register debugging via old (and only) interface
*/
dprintf("\000\377", &myDriverDebugHandle);
if (myDriverDebugHandle.dbg_prt)
{
return (1);
}
/*
* Check if we registered with an old maint driver (see debuglib.h)
*/
if (myDriverDebugHandle.dbg_end != NULL
/* location of 'dbg_prt' in _OldDbgHandle_ struct */
&& (myDriverDebugHandle.regTime.LowPart ||
myDriverDebugHandle.regTime.HighPart))
/* same location as in _OldDbgHandle_ struct */
{
dprintf("%s: Cannot log to old maint driver !", drvName);
myDriverDebugHandle.dbg_end =
((_OldDbgHandle_ *)&myDriverDebugHandle)->dbg_end;
DbgDeregister();
}
return (0);
}
/*****************************************************************************/
void
DbgSetLevel(unsigned long dbgMask)
{
myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
}
/*****************************************************************************/
void
DbgDeregister(void)
{
if (myDriverDebugHandle.dbg_end)
{
(myDriverDebugHandle.dbg_end)(&myDriverDebugHandle);
}
memset(&myDriverDebugHandle, 0, sizeof(myDriverDebugHandle));
}
void xdi_dbg_xlog(char *x, ...) {
va_list ap;
va_start(ap, x);
if (myDriverDebugHandle.dbg_end &&
(myDriverDebugHandle.dbg_irq || myDriverDebugHandle.dbg_old) &&
(myDriverDebugHandle.dbgMask & DL_STAT)) {
if (myDriverDebugHandle.dbg_irq) {
(*(myDriverDebugHandle.dbg_irq))(myDriverDebugHandle.id,
(x[0] != 0) ? DLI_TRC : DLI_XLOG, x, ap);
} else {
(*(myDriverDebugHandle.dbg_old))(myDriverDebugHandle.id, x, ap);
}
}
va_end(ap);
}
/*****************************************************************************/
#endif /* DIVA_NO_DEBUGLIB */
| gpl-2.0 |
mikel0pez/mlkernel_d838 | arch/powerpc/oprofile/backtrace.c | 10509 | 2953 | /**
* Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
**/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/compat.h>
#define STACK_SP(STACK) *(STACK)
#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
#ifdef CONFIG_PPC64
#define STACK_LR(STACK) STACK_LR64(STACK)
#else
#define STACK_LR(STACK) STACK_LR32(STACK)
#endif
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
unsigned int stack_frame[2];
void __user *p = compat_ptr(sp);
if (!access_ok(VERIFY_READ, p, sizeof(stack_frame)))
return 0;
/*
* The most likely reason for this is that we returned -EFAULT,
* which means that we've done all that we can do from
* interrupt context.
*/
if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR32(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we may transition to a different stack, eg a signal handler.
*/
return STACK_SP(stack_frame);
}
#ifdef CONFIG_PPC64
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame)))
return 0;
if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR64(stack_frame));
return STACK_SP(stack_frame);
}
#endif
static unsigned long kernel_getsp(unsigned long sp, int is_first)
{
unsigned long *stack_frame = (unsigned long *)sp;
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we might be transitioning from an interrupt stack to a kernel
* stack. validate_sp() is designed to understand this, so just
* use it.
*/
return STACK_SP(stack_frame);
}
void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long sp = regs->gpr[1];
int first_frame = 1;
/* We ditch the top stackframe so need to loop through an extra time */
depth += 1;
if (!user_mode(regs)) {
while (depth--) {
sp = kernel_getsp(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
} else {
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
return;
}
#endif
while (depth--) {
sp = user_getsp32(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
}
}
| gpl-2.0 |
idl3r/trinity | syscalls/write.c | 14 | 2293 | /*
* SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, size_t, count)
*/
#include <stdlib.h>
#include "arch.h" // page_size
#include "maps.h"
#include "random.h"
#include "sanitise.h"
#include "shm.h"
#include "syscall.h"
#include "trinity.h"
#include "utils.h"
static void sanitise_write(struct syscallrecord *rec)
{
unsigned int size;
void *ptr;
if (RAND_BOOL())
size = 1;
else
size = rand() % page_size;
ptr = malloc(size);
if (ptr == NULL)
return;
generate_rand_bytes(ptr, size);
rec->a2 = (unsigned long) ptr;
rec->a3 = size;
}
static void post_write(struct syscallrecord *rec)
{
freeptr(&rec->a2);
}
struct syscallentry syscall_write = {
.name = "write",
.num_args = 3,
.arg1name = "fd",
.arg1type = ARG_FD,
.arg2name = "buf",
.arg2type = ARG_NON_NULL_ADDRESS,
.arg3name = "count",
.arg3type = ARG_LEN,
.flags = NEED_ALARM,
.sanitise = sanitise_write,
.post = post_write,
};
/*
* SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen)
*/
struct syscallentry syscall_writev = {
.name = "writev",
.num_args = 3,
.arg1name = "fd",
.arg1type = ARG_FD,
.arg2name = "vec",
.arg2type = ARG_IOVEC,
.arg3name = "vlen",
.arg3type = ARG_IOVECLEN,
.flags = NEED_ALARM,
};
/*
* SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf, size_t count, loff_t po>
*/
static void sanitise_pwrite64(struct syscallrecord *rec)
{
sanitise_write(rec);
retry_pos:
if ((int) rec->a4 < 0) {
rec->a4 = rand64();
goto retry_pos;
}
}
struct syscallentry syscall_pwrite64 = {
.name = "pwrite64",
.num_args = 4,
.arg1name = "fd",
.arg1type = ARG_FD,
.arg2name = "buf",
.arg2type = ARG_ADDRESS,
.arg3name = "count",
.arg3type = ARG_LEN,
.arg4name = "pos",
.flags = NEED_ALARM,
.sanitise = sanitise_pwrite64,
.post = post_write,
};
/*
* SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
*/
struct syscallentry syscall_pwritev = {
.name = "pwritev",
.num_args = 5,
.arg1name = "fd",
.arg1type = ARG_FD,
.arg2name = "vec",
.arg2type = ARG_IOVEC,
.arg3name = "vlen",
.arg3type = ARG_IOVECLEN,
.arg4name = "pos_l",
.arg5name = "pos_h",
.flags = NEED_ALARM,
};
| gpl-2.0 |
houzhenggang/MaRa-a1a0a5aNaL | package/ezp-httpd/src/vs.c | 14 | 6074 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ezp.h"
#include "common.h"
#include "ezp-lib.h"
enum {
VS_ENABLE = 0,
VS_RULE_ENABLE,
VS_RULE_NAME,
VS_RULE_EXTIF,
VS_RULE_INDEX,
VS_RULE_ALIAS_ADDR,
VS_RULE_DEST_ADDR
};
struct variable vs_variables[] = {
{longname: "Virtual Server Enable", argv:ARGV("0", "1"),
nullok: FALSE},
{longname: "Virtual Server Rule Enable", argv:ARGV("0", "1"),
nullok: FALSE},
{longname: "Virtual Server Rule Name", argv:ARGV("20"),
nullok: TRUE},
{longname: "Virtual Server Rule External IF", argv:ARGV("wan"),
nullok: FALSE},
{longname: "Virtual Server Index", argv:ARGV(""),
nullok: FALSE},
{longname: "Virtual Server Alias IP Address", argv:ARGV(""),
nullok: FALSE},
{longname: "Virtual Server Destination IP Address", argv:ARGV(""),
nullok: FALSE},
};
/* name^enable[0|1]^extif^index^wan_ipaddr^mapped_ipaddr */
int
valid_vs(webs_t wp, char *value, struct variable *v)
{
int i, nrule;
char tmp[TMP_LEN];
char *enable, *name, *extif, *ip, *rule_num;
/* Enable of the whole forwarding mechanism. */
snprintf(tmp, sizeof(tmp), "vs_enable");
enable = websGetVar(wp, tmp, "");
if (valid_choice(wp, enable, &vs_variables[VS_ENABLE]) == FALSE) {
return FALSE;
}
if (*enable == '0') {
return TRUE;
}
printf("verify\n");
snprintf(tmp, sizeof(tmp), "vs_rule_num");
rule_num = websGetVar(wp, tmp, "0");
nrule = atoi(rule_num);
for (i = 0; i < nrule; i++) {
/* Get variables. */
/* Enable */
snprintf(tmp, sizeof(tmp), "vs_rule%d_enable", i);
enable = websGetVar(wp, tmp, "");
if (valid_choice(wp, enable, &vs_variables[VS_RULE_ENABLE])
== FALSE) {
return FALSE;
}
/* Name */
snprintf(tmp, sizeof(tmp), "vs_rule%d_name", i);
name = websGetVar(wp, tmp, "");
if (valid_name(wp, name, &vs_variables[VS_RULE_NAME]) == FALSE) {
return FALSE;
}
/* External IF */
snprintf(tmp, sizeof(tmp), "vs_rule%d_extif", i);
extif = websGetVar(wp, tmp, "");
if (valid_if(wp, extif, &vs_variables[VS_RULE_EXTIF]) == FALSE) {
return FALSE;
}
/* Alias IP */
snprintf(tmp, sizeof(tmp), "vs_rule%d_wan_ipaddr", i);
ip = websGetVar(wp, tmp, "");
if (valid_ipaddr(wp, ip, &vs_variables[VS_RULE_ALIAS_ADDR]) == FALSE) {
return FALSE;
}
/* Destination IP */
snprintf(tmp, sizeof(tmp), "vs_rule%d_mapped_ipaddr", i);
ip = websGetVar(wp, tmp, "");
if (valid_ipaddr(wp, ip, &vs_variables[VS_RULE_DEST_ADDR]) == FALSE) {
return FALSE;
}
}
return TRUE;
}
int
save_vs(webs_t wp, char *value, struct variable *v, struct service *s)
{
char *nv_enable = "vs_enable";
char *nv_rule_num = "vs_rule_num";
char *nv_rule_max = "vs_rule_max";
char *nv_rule = "vs_rule";
int i, nrule, change = 0, max;
int64_t map = 0;
char rule[LONG_BUF_LEN], old_rule[LONG_BUF_LEN];
char all_rule[LONG_BUF_LEN];
char tmp[TMP_LEN];
char *enable, *name, *extif, *wan_ipaddr, *mapped_ipaddr;
/* Enable virtual server. */
snprintf(tmp, sizeof(tmp), "vs_enable");
enable = websGetVar(wp, tmp, "");
if (strcmp(enable, nvram_safe_get(nv_enable))) {
config_preaction(&map, v, s, "NUM=0", "");
nvram_set(nv_enable, enable);
change = 1;
}
if (*enable == '1') {
/* Get rule number. */
snprintf(tmp, sizeof(tmp), "vs_rule_num");
nrule = atoi(websGetVar(wp, tmp, "0"));
max = atoi(nvram_safe_get(nv_rule_max));
nrule = (nrule >= max) ? max : nrule;
if (nrule != atoi(nvram_safe_get(nv_rule_num))) {
/* In case the situation that the last rule is deleted but others are
* kept the same and the check flow will treat it as 'nothing
* changed'.
*/
snprintf(tmp, sizeof(tmp), "%d", nrule);
config_preaction(&map, v, s, "NUM=0", "");
nvram_set(nv_rule_num, tmp);
change = 1;
}
all_rule[0]='\0';
for (i = 0; i < nrule; i++) {
/* Enable */
snprintf(tmp, sizeof(tmp), "vs_rule%d_enable", i);
enable = websGetVar(wp, tmp, "0");
/* Name */
snprintf(tmp, sizeof(tmp), "vs_rule%d_name", i);
name = websGetVar(wp, tmp, "");
/* External IF */
snprintf(tmp, sizeof(tmp), "vs_rule%d_extif", i);
extif = websGetVar(wp, tmp, "");
/* Alias Address */
snprintf(tmp, sizeof(tmp), "vs_rule%d_wan_ipaddr", i);
wan_ipaddr = websGetVar(wp, tmp, "");
/* Destination Address */
snprintf(tmp, sizeof(tmp), "vs_rule%d_mapped_ipaddr", i);
mapped_ipaddr = websGetVar(wp, tmp, "");
snprintf(rule, sizeof(rule), "%s^%s^%s^%d^%s^%s",
name, enable, extif, i, wan_ipaddr, mapped_ipaddr);
printf("%d:%s\n",i,rule);
if (ezplib_get_rule(nv_rule, i, old_rule, TMP_LEN) < 0) {
/* i-th rule doesn't exist */
config_preaction(&map, v, s, "NUM=0", "");
ezplib_append_rule(nv_rule, rule);
change = 1;
} else {
if (strcmp(rule, old_rule)) {
config_preaction(&map, v, s, "NUM=0", "");
ezplib_replace_rule(nv_rule, i, rule);
change = 1;
}
}
if (i == 0) snprintf(all_rule, sizeof(all_rule), "%s", rule);
else snprintf(all_rule, sizeof(all_rule), "%s|%s", all_rule, rule);
}
nvram_set(nv_rule, all_rule);
}
if (change) {
config_postaction(map, s, "NUM=0", "");
}
return change;
}
| gpl-2.0 |
princeofdarkness76/gcc-4.2 | gcc/config/alpha/vms-crt0-64.c | 14 | 2663 | /* VMS 64bit crt0 returning VMS style condition codes .
Copyright (C) 2001 Free Software Foundation, Inc.
Contributed by Douglas B. Rupp (rupp@gnat.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#if !defined(__DECC)
You Lose! This file can only be compiled with DEC C.
#else
/* This file can only be compiled with DEC C, due to the call to
lib$establish and the pragmas pointer_size. */
#pragma __pointer_size short
#include <stdlib.h>
#include <string.h>
#include <ssdef.h>
extern void decc$main ();
extern int main ();
static int
handler (sigargs, mechargs)
void *sigargs;
void *mechargs;
{
return SS$_RESIGNAL;
}
int
__main (arg1, arg2, arg3, image_file_desc, arg5, arg6)
void *arg1, *arg2, *arg3;
void *image_file_desc;
void *arg5, *arg6;
{
int argc;
char **argv;
char **envp;
#pragma __pointer_size long
int i;
char **long_argv;
char **long_envp;
#pragma __pointer_size short
lib$establish (handler);
decc$main (arg1, arg2, arg3, image_file_desc,
arg5, arg6, &argc, &argv, &envp);
#pragma __pointer_size long
/* Reallocate argv with 64 bit pointers. */
long_argv = (char **) malloc (sizeof (char *) * (argc + 1));
for (i = 0; i < argc; i++)
long_argv[i] = strdup (argv[i]);
long_argv[argc] = (char *) 0;
long_envp = (char **) malloc (sizeof (char *) * 5);
for (i = 0; envp[i]; i++)
long_envp[i] = strdup (envp[i]);
long_envp[i] = (char *) 0;
#pragma __pointer_size short
return main (argc, long_argv, long_envp);
}
#endif
| gpl-2.0 |
galaxyishere/samsung-kernel-latona | drivers/mmc/core/sd.c | 14 | 17372 | /*
* linux/drivers/mmc/core/sd.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#define _MMC_SAFE_ACCESS_
#ifdef _MMC_SAFE_ACCESS_
extern int mmc_is_available;
#endif
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
};
static const unsigned char tran_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const unsigned int tacc_exp[] = {
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
};
static const unsigned int tacc_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
const int __off = 3 - ((start) / 32); \
const int __shft = (start) & 31; \
u32 __res; \
\
__res = resp[__off] >> __shft; \
if (__size + __shft > 32) \
__res |= resp[__off-1] << ((32 - __shft) % 32); \
__res & __mask; \
})
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
static void mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
memset(&card->cid, 0, sizeof(struct mmc_cid));
/*
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
*/
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
card->cid.year = UNSTUFF_BITS(resp, 12, 8);
card->cid.month = UNSTUFF_BITS(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
}
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
static int mmc_decode_csd(struct mmc_card *card)
{
struct mmc_csd *csd = &card->csd;
unsigned int e, m, csd_struct;
u32 *resp = card->raw_csd;
csd_struct = UNSTUFF_BITS(resp, 126, 2);
switch (csd_struct) {
case 0:
m = UNSTUFF_BITS(resp, 115, 4);
e = UNSTUFF_BITS(resp, 112, 3);
csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
e = UNSTUFF_BITS(resp, 47, 3);
m = UNSTUFF_BITS(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
break;
case 1:
/*
* This is a block-addressed SDHC card. Most
* interesting fields are unused and have fixed
* values. To avoid getting tripped by buggy cards,
* we assume those fixed values ourselves.
*/
mmc_card_set_blockaddr(card);
csd->tacc_ns = 0; /* Unused */
csd->tacc_clks = 0; /* Unused */
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
m = UNSTUFF_BITS(resp, 48, 22);
csd->capacity = (1 + m) << 10;
csd->read_blkbits = 9;
csd->read_partial = 0;
csd->write_misalign = 0;
csd->read_misalign = 0;
csd->r2w_factor = 4; /* Unused */
csd->write_blkbits = 9;
csd->write_partial = 0;
break;
default:
printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
return -EINVAL;
}
return 0;
}
/*
* Given a 64-bit response, decode to our card SCR structure.
*/
static int mmc_decode_scr(struct mmc_card *card)
{
struct sd_scr *scr = &card->scr;
unsigned int scr_struct;
u32 resp[4];
resp[3] = card->raw_scr[1];
resp[2] = card->raw_scr[0];
scr_struct = UNSTUFF_BITS(resp, 60, 4);
if (scr_struct != 0) {
printk(KERN_ERR "%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
return 0;
}
/*
* Fetches and decodes switch information
*/
static int mmc_read_switch(struct mmc_card *card)
{
int err;
u8 *status;
if (card->scr.sda_vsn < SCR_SPEC_VER_1)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH)) {
printk(KERN_WARNING "%s: card lacks mandatory switch "
"function, performance might suffer.\n",
mmc_hostname(card->host));
return 0;
}
err = -EIO;
status = kmalloc(64, GFP_KERNEL);
if (!status) {
printk(KERN_ERR "%s: could not allocate a buffer for "
"switch capabilities.\n", mmc_hostname(card->host));
return -ENOMEM;
}
err = mmc_sd_switch(card, 0, 0, 1, status);
if (err) {
/* If the host or the card can't do the switch,
* fail more gracefully. */
if ((err != -EINVAL)
&& (err != -ENOSYS)
&& (err != -EFAULT))
goto out;
printk(KERN_WARNING "%s: problem reading switch "
"capabilities, performance might suffer.\n",
mmc_hostname(card->host));
err = 0;
goto out;
}
if (status[13] & 0x02)
card->sw_caps.hs_max_dtr = 50000000;
out:
kfree(status);
return err;
}
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
static int mmc_switch_hs(struct mmc_card *card)
{
int err;
u8 *status;
if (card->scr.sda_vsn < SCR_SPEC_VER_1)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH))
return 0;
if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
return 0;
if (card->sw_caps.hs_max_dtr == 0)
return 0;
err = -EIO;
status = kmalloc(64, GFP_KERNEL);
if (!status) {
printk(KERN_ERR "%s: could not allocate a buffer for "
"switch capabilities.\n", mmc_hostname(card->host));
return -ENOMEM;
}
err = mmc_sd_switch(card, 1, 0, 1, status);
if (err)
goto out;
if ((status[16] & 0xF) != 1) {
printk(KERN_WARNING "%s: Problem switching card "
"into high-speed mode!\n",
mmc_hostname(card->host));
} else {
mmc_card_set_highspeed(card);
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
out:
kfree(status);
return err;
}
MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
card->raw_cid[2], card->raw_cid[3]);
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
static struct attribute *sd_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_scr.attr,
&dev_attr_date.attr,
&dev_attr_fwrev.attr,
&dev_attr_hwrev.attr,
&dev_attr_manfid.attr,
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
NULL,
};
static struct attribute_group sd_std_attr_group = {
.attrs = sd_std_attrs,
};
static const struct attribute_group *sd_attr_groups[] = {
&sd_std_attr_group,
NULL,
};
static struct device_type sd_type = {
.groups = sd_attr_groups,
};
/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
* we're trying to reinitialise.
*/
static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
int err;
u32 cid[4];
unsigned int max_dtr;
#ifdef CONFIG_MMC_PARANOID_SD_INIT
int retries;
#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
* state. We wait 1ms to give cards time to
* respond.
*/
mmc_go_idle(host);
/*
* If SD_SEND_IF_COND indicates an SD 2.0
* compliant card and we should set bit 30
* of the ocr to indicate that we can handle
* block-addressed SDHC cards.
*/
err = mmc_send_if_cond(host, ocr);
if (!err)
ocr |= 1 << 30;
err = mmc_send_app_op_cond(host, ocr, NULL);
if (err)
goto err;
/*
* Fetch CID from card.
*/
if (mmc_host_is_spi(host))
err = mmc_send_cid(host, cid);
else
err = mmc_all_send_cid(host, cid);
if (err)
goto err;
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
err = -ENOENT;
goto err;
}
card = oldcard;
} else {
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, &sd_type);
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto err;
}
card->type = MMC_TYPE_SD;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
/*
* For native busses: get card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto free_card;
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
if (!oldcard) {
/*
* Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd);
if (err)
goto free_card;
err = mmc_decode_csd(card);
if (err)
goto free_card;
mmc_decode_cid(card);
}
/*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto free_card;
}
if (!oldcard) {
/*
* Fetch SCR from card.
*/
err = mmc_app_send_scr(card, card->raw_scr);
if (err)
goto free_card;
err = mmc_decode_scr(card);
if (err < 0)
goto free_card;
/*
* Fetch switch information from card.
*/
#ifdef CONFIG_MMC_PARANOID_SD_INIT
for (retries = 1; retries <= 3; retries++) {
err = mmc_read_switch(card);
if (!err) {
if (retries > 1) {
printk(KERN_WARNING
"%s: recovered\n",
mmc_hostname(host));
}
break;
} else {
printk(KERN_WARNING
"%s: read switch failed (attempt %d)\n",
mmc_hostname(host), retries);
}
}
#else
err = mmc_read_switch(card);
#endif
if (err)
goto free_card;
}
/*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
* to provide valid CRCs for non-512-byte blocks.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
goto free_card;
}
/*
* Attempt to change to high-speed (if supported)
*/
err = mmc_switch_hs(card);
if (err)
goto free_card;
/*
* Compute bus speed.
*/
max_dtr = (unsigned int)-1;
if (mmc_card_highspeed(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
max_dtr = card->sw_caps.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
max_dtr = card->csd.max_dtr;
}
mmc_set_clock(host, max_dtr);
/*
* Switch to wider bus (if supported).
*/
if ((host->caps & MMC_CAP_4_BIT_DATA) &&
(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
if (err)
goto free_card;
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
/*
* Check if read-only switch is active.
*/
if (!oldcard) {
if (!host->ops->get_ro || host->ops->get_ro(host) < 0) {
printk(KERN_WARNING "%s: host does not "
"support reading read-only "
"switch. assuming write-enable.\n",
mmc_hostname(host));
} else {
if (host->ops->get_ro(host) > 0)
mmc_card_set_readonly(card);
}
}
if (!oldcard)
host->card = card;
return 0;
free_card:
if (!oldcard)
mmc_remove_card(card);
err:
return err;
}
/*
* Host is being removed. Free up the current card.
*/
static void mmc_sd_remove(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(!host->card);
mmc_remove_card(host->card);
host->card = NULL;
}
/*
* Card detection callback from host.
*/
static void mmc_sd_detect(struct mmc_host *host)
{
int err = 0;
#ifdef CONFIG_MMC_PARANOID_SD_INIT
int retries = 5;
#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
#ifdef CONFIG_MMC_PARANOID_SD_INIT
while(retries) {
err = mmc_send_status(host->card, NULL);
if (err) {
retries--;
udelay(5);
continue;
}
break;
}
if (!retries) {
printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
__func__, mmc_hostname(host), err);
}
#else
send_again:
err = mmc_send_status(host->card, NULL);
#ifdef _MMC_SAFE_ACCESS_
if((mmc_is_available == 0)&&(err == 0))
goto send_again;
#endif
#endif
mmc_release_host(host);
if (err) {
mmc_sd_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_release_host(host);
}
}
/*
* Suspend callback from host.
*/
static int mmc_sd_suspend(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
if (!mmc_host_is_spi(host))
mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
return 0;
}
/*
* Resume callback from host.
*
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
static int mmc_sd_resume(struct mmc_host *host)
{
int err;
#ifdef CONFIG_MMC_PARANOID_SD_INIT
int retries;
#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
#ifdef CONFIG_MMC_PARANOID_SD_INIT
retries = 5;
while (retries) {
err = mmc_sd_init_card(host, host->ocr, host->card);
if (err) {
printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
mmc_hostname(host), err, retries);
mdelay(5);
retries--;
continue;
}
break;
}
#else
err = mmc_sd_init_card(host, host->ocr, host->card);
#endif
mmc_release_host(host);
return err;
}
static int mmc_sd_power_restore(struct mmc_host *host)
{
int ret;
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
ret = mmc_sd_init_card(host, host->ocr, host->card);
mmc_release_host(host);
return ret;
}
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_sd_power_restore,
};
static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.power_restore = mmc_sd_power_restore,
};
static void mmc_sd_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
if (!mmc_card_is_removable(host))
bus_ops = &mmc_sd_ops_unsafe;
else
bus_ops = &mmc_sd_ops;
mmc_attach_bus(host, bus_ops);
}
/*
* Starting point for SD card init.
*/
int mmc_attach_sd(struct mmc_host *host, u32 ocr)
{
int err;
#ifdef CONFIG_MMC_PARANOID_SD_INIT
int retries;
#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
mmc_sd_attach_bus_ops(host);
/*
* We need to get OCR a different way for SPI.
*/
if (mmc_host_is_spi(host)) {
mmc_go_idle(host);
err = mmc_spi_read_ocr(host, 0, &ocr);
if (err)
goto err;
}
/*
* Sanity check the voltages that the card claims to
* support.
*/
if (ocr & 0x7F) {
printk(KERN_WARNING "%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
ocr &= ~0x7F;
}
if (ocr & MMC_VDD_165_195) {
printk(KERN_WARNING "%s: SD card claims to support the "
"incompletely defined 'low voltage range'. This "
"will be ignored.\n", mmc_hostname(host));
ocr &= ~MMC_VDD_165_195;
}
host->ocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!host->ocr) {
err = -EINVAL;
goto err;
}
/*
* Detect and init the card.
*/
#ifdef CONFIG_MMC_PARANOID_SD_INIT
retries = 5;
while (retries) {
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err) {
retries--;
continue;
}
break;
}
if (!retries) {
printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
mmc_hostname(host), err);
goto err;
}
#else
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err)
goto err;
#endif
mmc_release_host(host);
err = mmc_add_card(host->card);
if (err)
goto remove_card;
return 0;
remove_card:
mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host);
err:
mmc_detach_bus(host);
mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
return err;
}
| gpl-2.0 |
ouya/xbmc | xbmc/utils/AliasShortcutUtils.cpp | 14 | 4028 | /*
* Copyright (C) 2009-2012 Team XBMC
* http://www.xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#if defined(TARGET_DARWIN_OSX)
#include <CoreServices/CoreServices.h>
#include "utils/URIUtils.h"
#elif defined(_LINUX)
#else
#endif
#include "AliasShortcutUtils.h"
bool IsAliasShortcut(CStdString &path)
{
bool rtn = false;
#if defined(TARGET_DARWIN_OSX)
// Note: regular files that have an .alias extension can be
// reported as an alias when clearly, they are not. Trap them out.
if (URIUtils::GetExtension(path) != ".alias")
{
FSRef fileRef;
Boolean targetIsFolder, wasAliased;
// It is better to call FSPathMakeRefWithOptions and pass kFSPathMakeRefDefaultOptions
// since it will succeed for paths such as "/Volumes" unlike FSPathMakeRef.
if (noErr == FSPathMakeRefWithOptions((UInt8*)path.c_str(), kFSPathMakeRefDefaultOptions, &fileRef, NULL))
{
if (noErr == FSIsAliasFile(&fileRef, &wasAliased, &targetIsFolder))
{
if (wasAliased)
{
rtn = true;
}
}
}
}
#elif defined(_LINUX)
// Linux does not use alias or shortcut methods
#elif defined(WIN32)
/* Needs testing under Windows platform so ignore shortcuts for now
if (CUtil::GetExtension(path) == ".lnk")
{
rtn = true;
}
*/
#endif
return(rtn);
}
void TranslateAliasShortcut(CStdString &path)
{
#if defined(TARGET_DARWIN_OSX)
FSRef fileRef;
Boolean targetIsFolder, wasAliased;
if (noErr == FSPathMakeRefWithOptions((UInt8*)path.c_str(), kFSPathMakeRefDefaultOptions, &fileRef, NULL))
{
if (noErr == FSResolveAliasFileWithMountFlags(&fileRef, TRUE, &targetIsFolder, &wasAliased, kResolveAliasFileNoUI))
{
if (wasAliased)
{
char real_path[PATH_MAX];
if (noErr == FSRefMakePath(&fileRef, (UInt8*)real_path, PATH_MAX))
{
path = real_path;
}
}
}
}
#elif defined(_LINUX)
// Linux does not use alias or shortcut methods
#elif defined(WIN32)
/* Needs testing under Windows platform so ignore shortcuts for now
CComPtr<IShellLink> ipShellLink;
// Get a pointer to the IShellLink interface
if (NOERROR == CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER, IID_IShellLink, (void**)&ipShellLink))
WCHAR wszTemp[MAX_PATH];
// Get a pointer to the IPersistFile interface
CComQIPtr<IPersistFile> ipPersistFile(ipShellLink);
// IPersistFile is using LPCOLESTR so make sure that the string is Unicode
#if !defined _UNICODE
MultiByteToWideChar(CP_ACP, 0, lpszShortcutPath, -1, wszTemp, MAX_PATH);
#else
wcsncpy(wszTemp, lpszShortcutPath, MAX_PATH);
#endif
// Open the shortcut file and initialize it from its contents
if (NOERROR == ipPersistFile->Load(wszTemp, STGM_READ))
{
// Try to find the target of a shortcut even if it has been moved or renamed
if (NOERROR == ipShellLink->Resolve(NULL, SLR_UPDATE))
{
WIN32_FIND_DATA wfd;
TCHAR real_path[PATH_MAX];
// Get the path to the shortcut target
if (NOERROR == ipShellLink->GetPath(real_path, MAX_PATH, &wfd, SLGP_RAWPATH))
{
// Get the description of the target
TCHAR szDesc[MAX_PATH];
if (NOERROR == ipShellLink->GetDescription(szDesc, MAX_PATH))
{
path = real_path;
}
}
}
}
}
*/
#endif
}
| gpl-2.0 |
zhengsjembest/linux_am335x | linux-3.2.0-psp04.06.00.08.sdk/arch/ia64/hp/common/sba_iommu.c | 270 | 60276 | /*
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
** (c) Copyright 2002-2005 Alex Williamson
** (c) Copyright 2002-2003 Grant Grundler
** (c) Copyright 2002-2005 Hewlett-Packard Company
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
**
** This module initializes the IOC (I/O Controller) found on HP
** McKinley machines and their successors.
**
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/bitops.h> /* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h>
#include <asm/page.h> /* PAGE_OFFSET */
#include <asm/dma.h>
#include <asm/system.h> /* wmb() */
#include <asm/acpi-ext.h>
extern int swiotlb_late_init_with_default_size (size_t size);
#define PFX "IOC: "
/*
** Enabling timing search of the pdir resource map. Output in /proc.
** Disabled by default to optimize performance.
*/
#undef PDIR_SEARCH_TIMING
/*
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** not defined, all DMA will be 32bit and go through the TLB.
** There's potentially a conflict in the bio merge code with us
** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
** appears to give more performance than bio-level virtual merging, we'll
** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
** completely restrict DMA to the IOMMU.
*/
#define ALLOW_IOV_BYPASS
/*
** This option specifically allows/disallows bypassing scatterlists with
** multiple entries. Coalescing these entries can allow better DMA streaming
** and in some cases shows better performance than entirely bypassing the
** IOMMU. Performance increase on the order of 1-2% sequential output/input
** using bonnie++ on a RAID0 MD device (sym2 & mpt).
*/
#undef ALLOW_IOV_BYPASS_SG
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
** particularly aggressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
*/
#undef FULL_VALID_PDIR
#define ENABLE_MARK_CLEAN
/*
** The number of debug flags is a clue - this code is fragile. NOTE: since
** tightening the use of res_lock the resource bitmap and actual pdir are no
** longer guaranteed to stay in sync. The sanity checking code isn't going to
** like that.
*/
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
#undef DEBUG_SBA_RUN_SG
#undef DEBUG_SBA_RESOURCE
#undef ASSERT_PDIR_SANITY
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_BYPASS
#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
#endif
#define SBA_INLINE __inline__
/* #define SBA_INLINE */
#ifdef DEBUG_SBA_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_INIT(x...)
#endif
#ifdef DEBUG_SBA_RUN
#define DBG_RUN(x...) printk(x)
#else
#define DBG_RUN(x...)
#endif
#ifdef DEBUG_SBA_RUN_SG
#define DBG_RUN_SG(x...) printk(x)
#else
#define DBG_RUN_SG(x...)
#endif
#ifdef DEBUG_SBA_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
#ifdef DEBUG_BYPASS
#define DBG_BYPASS(x...) printk(x)
#else
#define DBG_BYPASS(x...)
#endif
#ifdef ASSERT_PDIR_SANITY
#define ASSERT(expr) \
if(!(expr)) { \
printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
panic(#expr); \
}
#else
#define ASSERT(expr)
#endif
/*
** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 64
#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
#define IOC_FUNC_ID 0x000
#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
#define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
#define IOC_ROPE0_CFG 0x500
#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
/* AGP GART driver looks for this */
#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
/*
** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
**
** Some IOCs (sx1000) can run at the above pages sizes, but are
** really only supported using the IOC at a 4k page size.
**
** iovp_size could only be greater than PAGE_SIZE if we are
** confident the drivers really only touch the next physical
** page iff that driver instance owns it.
*/
static unsigned long iovp_size;
static unsigned long iovp_shift;
static unsigned long iovp_mask;
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base */
unsigned long imask; /* pdir IOV Space mask */
unsigned long *res_hint; /* next avail IOVP - circular search */
unsigned long dma_mask;
spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
/* clearing pdir to prevent races with allocations. */
unsigned int res_bitshift; /* from the RIGHT! */
unsigned int res_size; /* size of resource map in bytes */
#ifdef CONFIG_NUMA
unsigned int node; /* node where this IOC lives */
#endif
#if DELAYED_RESOURCE_CNT > 0
spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
/* than res_lock for bigger systems. */
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef PDIR_SEARCH_TIMING
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
#endif
/* Stuff we don't need in performance path */
struct ioc *next; /* list of IOC's in system */
acpi_handle handle; /* for multiple IOC's */
const char *name;
unsigned int func_id;
unsigned int rev; /* HW revision of chip */
u32 iov_size;
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
struct pci_dev *sac_only_dev;
};
static struct ioc *ioc_list;
static int reserve_sba_gart = 1;
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
#define sba_sg_address(sg) sg_virt((sg))
#ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page;
#endif
#ifdef CONFIG_PCI
# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
#else
# define GET_IOC(dev) NULL
#endif
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation algorithms can be
** faster with smaller sizes (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
/************************************
** SBA register read and write support
**
** BE WARNED: register writes are posted.
** (ie follow writes which must reach HW with a read)
**
*/
#define READ_REG(addr) __raw_readq(addr)
#define WRITE_REG(val, addr) __raw_writeq(val, addr)
#ifdef DEBUG_SBA_INIT
/**
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
*
* Print the size/location of the IO MMU PDIR.
*/
static void
sba_dump_tlb(char *hpa)
{
DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
DBG_INIT("\n");
}
#endif
#ifdef ASSERT_PDIR_SANITY
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @pide: pdir index.
*
* Print one entry of the IO MMU PDIR in human readable form.
*/
static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
uint rcnt;
printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
rcnt = 0;
while (rcnt < BITS_PER_LONG) {
printk(KERN_DEBUG "%s %2d %p %016Lx\n",
(rcnt == (pide & (BITS_PER_LONG - 1)))
? " -->" : " ",
rcnt, ptr, (unsigned long long) *ptr );
rcnt++;
ptr++;
}
printk(KERN_DEBUG "%s", msg);
}
/**
* sba_check_pdir - debugging only - consistency checker
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
*
* Verify the resource map and pdir state is consistent
*/
static int
sba_check_pdir(struct ioc *ioc, char *msg)
{
u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
u64 *pptr = ioc->pdir_base; /* pdir ptr */
uint pide = 0;
while (rptr < rptr_end) {
u64 rval;
int rcnt; /* number of bits we might check */
rval = *rptr;
rcnt = 64;
while (rcnt) {
/* Get last byte and highest bit from that */
u32 pde = ((u32)((*pptr >> (63)) & 0x1));
if ((rval & 0x1) ^ pde)
{
/*
** BUMMER! -- res_map != pdir --
** Dump rval and matching pdir entries
*/
sba_dump_pdir_entry(ioc, msg, pide);
return(1);
}
rcnt--;
rval >>= 1; /* try the next bit */
pptr++;
pide++;
}
rptr++; /* look at next word of res_map */
}
/* It'd be nice if we always got here :^) */
return 0;
}
/**
* sba_dump_sg - debugging only - print Scatter-Gather list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: head of the SG list
* @nents: number of entries in SG list
*
* print the SG list so we can verify it's correct by hand.
*/
static void
sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
while (nents-- > 0) {
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
startsg->dma_address, startsg->dma_length,
sba_sg_address(startsg));
startsg = sg_next(startsg);
}
}
static void
sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{
struct scatterlist *the_sg = startsg;
int the_nents = nents;
while (the_nents-- > 0) {
if (sba_sg_address(the_sg) == 0x0UL)
sba_dump_sg(NULL, startsg, nents);
the_sg = sg_next(the_sg);
}
}
#endif /* ASSERT_PDIR_SANITY */
/**************************************************************
*
* I/O Pdir Resource Management
*
* Bits set in the resource map are in use.
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
***************************************************************/
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
#define PDIR_ENTRY_SIZE sizeof(u64)
#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
#define RESMAP_MASK(n) ~(~0UL << (n))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
/**
* For most cases the normal get_order is sufficient, however it limits us
* to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
* It only incurs about 1 clock cycle to use this one with the static variable
* and makes the code more intuitive.
*/
static SBA_INLINE int
get_iovp_order (unsigned long size)
{
long double d = size - 1;
long order;
order = ia64_getf_exp(d);
order = order - iovp_shift - 0xffff + 1;
if (order < 0)
order = 0;
return order;
}
static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
unsigned int bitshiftcnt)
{
return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+ bitshiftcnt;
}
/**
* sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
* @use_hint: use res_hint to indicate where to start looking
*
* Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time.
*/
static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long bits_wanted, int use_hint)
{
unsigned long *res_ptr;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
unsigned long flags, pide = ~0UL, tpide;
unsigned long boundary_size;
unsigned long shift;
int ret;
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
BUG_ON(ioc->ibase & ~iovp_mask);
shift = ioc->ibase >> iovp_shift;
spin_lock_irqsave(&ioc->res_lock, flags);
/* Allow caller to force a search through the entire resource space */
if (likely(use_hint)) {
res_ptr = ioc->res_hint;
} else {
res_ptr = (ulong *)ioc->res_map;
ioc->res_bitshift = 0;
}
/*
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
* if a TLB entry is purged while in use. sba_mark_invalid()
* purges IOTLB entries in power-of-two sizes, so we also
* allocate IOVA space in power-of-two sizes.
*/
bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) {
if (likely(*res_ptr != ~0UL)) {
bitshiftcnt = ffz(*res_ptr);
*res_ptr |= (1UL << bitshiftcnt);
pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ioc->res_bitshift = bitshiftcnt + bits_wanted;
goto found_it;
}
}
goto not_found;
}
if (likely(bits_wanted <= BITS_PER_LONG/2)) {
/*
** Search the resource bit map on well-aligned values.
** "o" is the alignment.
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
*/
unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
unsigned long mask, base_mask;
base_mask = RESMAP_MASK(bits_wanted);
mask = base_mask << bitshiftcnt;
DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
for(; res_ptr < res_end ; res_ptr++)
{
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
ASSERT(0 != mask);
for (; mask ; mask <<= o, bitshiftcnt += o) {
tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((0 == ((*res_ptr) & mask)) && !ret) {
*res_ptr |= mask; /* mark resources busy! */
pide = tpide;
ioc->res_bitshift = bitshiftcnt + bits_wanted;
goto found_it;
}
}
bitshiftcnt = 0;
mask = base_mask;
}
} else {
int qwords, bits, i;
unsigned long *end;
qwords = bits_wanted >> 6; /* /64 */
bits = bits_wanted - (qwords * BITS_PER_LONG);
end = res_end - qwords;
for (; res_ptr < end; res_ptr++) {
tpide = ptr_to_pide(ioc, res_ptr, 0);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift, boundary_size);
if (ret)
goto next_ptr;
for (i = 0 ; i < qwords ; i++) {
if (res_ptr[i] != 0)
goto next_ptr;
}
if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
continue;
/* Found it, mark it */
for (i = 0 ; i < qwords ; i++)
res_ptr[i] = ~0UL;
res_ptr[i] |= RESMAP_MASK(bits);
pide = tpide;
res_ptr += qwords;
ioc->res_bitshift = bits;
goto found_it;
next_ptr:
;
}
}
not_found:
prefetch(ioc->res_map);
ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
found_it:
ioc->res_hint = res_ptr;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide);
}
/**
* sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
*
* Given a size, find consecutive unmarked and then mark those bits in the
* resource bit map.
*/
static int
sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
unsigned int pages_needed = size >> iovp_shift;
#ifdef PDIR_SEARCH_TIMING
unsigned long itc_start;
#endif
unsigned long pide;
ASSERT(pages_needed);
ASSERT(0 == (size & ~iovp_mask));
#ifdef PDIR_SEARCH_TIMING
itc_start = ia64_get_itc();
#endif
/*
** "seek and ye shall find"...praying never hurts either...
*/
pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
if (unlikely(pide >= (ioc->res_size << 3))) {
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) {
#if DELAYED_RESOURCE_CNT > 0
unsigned long flags;
/*
** With delayed resource freeing, we can give this one more shot. We're
** getting close to being in trouble here, so do what we can to make this
** one count.
*/
spin_lock_irqsave(&ioc->saved_lock, flags);
if (ioc->saved_cnt > 0) {
struct sba_dma_pair *d;
int cnt = ioc->saved_cnt;
d = &(ioc->saved[ioc->saved_cnt - 1]);
spin_lock(&ioc->res_lock);
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock(&ioc->res_lock);
}
spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) {
printk(KERN_WARNING "%s: I/O MMU @ %p is"
"out of mapping resources, %u %u %lx\n",
__func__, ioc->ioc_hpa, ioc->res_size,
pages_needed, dma_get_seg_boundary(dev));
return -1;
}
#else
printk(KERN_WARNING "%s: I/O MMU @ %p is"
"out of mapping resources, %u %u %lx\n",
__func__, ioc->ioc_hpa, ioc->res_size,
pages_needed, dma_get_seg_boundary(dev));
return -1;
#endif
}
}
#ifdef PDIR_SEARCH_TIMING
ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
#endif
prefetchw(&(ioc->pdir_base[pide]));
#ifdef ASSERT_PDIR_SANITY
/* verify the first enable bit is clear */
if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
}
#endif
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
__func__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
return (pide);
}
/**
* sba_free_range - unmark bits in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
*
* clear bits in the ioc's resource map
*/
static SBA_INLINE void
sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
{
unsigned long iovp = SBA_IOVP(ioc, iova);
unsigned int pide = PDIR_INDEX(iovp);
unsigned int ridx = pide >> 3; /* convert bit to byte address */
unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
int bits_not_wanted = size >> iovp_shift;
unsigned long m;
/* Round up to power-of-two size: see AR2305 note above */
bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
/* these mappings start 64bit aligned */
*res_ptr = 0UL;
bits_not_wanted -= BITS_PER_LONG;
pide += BITS_PER_LONG;
} else {
/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
bits_not_wanted = 0;
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
ASSERT(m != 0);
ASSERT(bits_not_wanted);
ASSERT((*res_ptr & m) == m); /* verify same bits are set */
*res_ptr &= ~m;
}
}
}
/**************************************************************
*
* "Dynamic DMA Mapping" support (aka "Coherent I/O")
*
***************************************************************/
/**
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
* @vba: Virtual CPU address of buffer to map
*
* SBA Mapping Routine
*
* Given a virtual address (vba, arg1) sba_io_pdir_entry()
* loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0):
*
* 63 40 11 7 0
* +-+---------------------+----------------------------------+----+--------+
* |V| U | PPN[39:12] | U | FF |
* +-+---------------------+----------------------------------+----+--------+
*
* V == Valid Bit
* U == Unused
* PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/
#if 1
#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
| 0x8000000000000000ULL)
#else
void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
{
*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
}
#endif
#ifdef ENABLE_MARK_CLEAN
/**
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
static void
mark_clean (void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page((void *)pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
#endif
/**
* sba_mark_invalid - invalidate one or more IO PDIR entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
*
* Marking the IO PDIR entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
*
* The PCOM register supports purging of multiple pages, with a minium
* of 1 page and a maximum of 2GB. Hardware requires the address be
* aligned to the size of the range being purged. The size of the range
* must be a power of 2. The "Cool perf optimization" in the
* allocation routine helps keep that true.
*/
static SBA_INLINE void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
/* Must be non-zero and rounded up */
ASSERT(byte_cnt > 0);
ASSERT(0 == (byte_cnt & ~iovp_mask));
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set */
if (!(ioc->pdir_base[off] >> 60)) {
sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
}
#endif
if (byte_cnt <= iovp_size)
{
ASSERT(off < ioc->pdir_size);
iovp |= iovp_shift; /* set "size" field for PCOM */
#ifndef FULL_VALID_PDIR
/*
** clear I/O PDIR entry "valid" bit
** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously
** been enabled.
*/
ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
/*
** If we want to maintain the PDIR as valid, put in
** the spill page so devices prefetching won't
** cause a hard fail.
*/
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
} else {
u32 t = get_iovp_order(byte_cnt) + iovp_shift;
iovp |= t;
ASSERT(t <= 31); /* 2GB! Max value of "size" field */
do {
/* verify this pdir entry is enabled */
ASSERT(ioc->pdir_base[off] >> 63);
#ifndef FULL_VALID_PDIR
/* clear I/O Pdir entry "valid" bit first */
ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
off++;
byte_cnt -= iovp_size;
} while (byte_cnt > 0);
}
WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
}
/**
* sba_map_single_attrs - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map.
* @size: number of bytes to map in driver buffer.
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
unsigned long poff, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct ioc *ioc;
void *addr = page_address(page) + poff;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
int pide;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
#ifdef ALLOW_IOV_BYPASS
unsigned long pci_addr = virt_to_phys(addr);
#endif
#ifdef ALLOW_IOV_BYPASS
ASSERT(to_pci_dev(dev)->dma_mask);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
/*
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
"0x%lx/0x%lx\n",
to_pci_dev(dev)->dma_mask, pci_addr);
return pci_addr;
}
#endif
ioc = GET_IOC(dev);
ASSERT(ioc);
prefetch(ioc->res_hint);
ASSERT(size > 0);
ASSERT(size <= DMA_CHUNK_SIZE);
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
/* round up to nearest iovp_size */
size = (size + offset + ~iovp_mask) & iovp_mask;
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
panic("Sanity check failed");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
pide = sba_alloc_range(ioc, dev, size);
if (pide < 0)
return 0;
iovp = (dma_addr_t) pide << iovp_shift;
DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
while (size > 0) {
ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
sba_io_pdir_entry(pdir_start, (unsigned long) addr);
DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
addr += iovp_size;
size -= iovp_size;
pdir_start++;
}
/* force pdir update */
wmb();
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
return SBA_IOVA(ioc, iovp, offset);
}
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return sba_map_page(dev, virt_to_page(addr),
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
}
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
void *addr;
if (size <= iovp_size) {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, size);
} else {
do {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, min(size, iovp_size));
off++;
size -= iovp_size;
} while (size > 0);
}
}
#endif
/**
* sba_unmap_single_attrs - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
struct sba_dma_pair *d;
#endif
unsigned long flags;
dma_addr_t offset;
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS
if (likely((iova & ioc->imask) != ioc->ibase)) {
/*
** Address does not fall w/in IOVA, must be bypassing
*/
DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
iova);
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE) {
mark_clean(phys_to_virt(iova), size);
}
#endif
return;
}
#endif
offset = iova & ~iovp_mask;
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
iova ^= offset; /* clear offset bits */
size += offset;
size = ROUNDUP(size, iovp_size);
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE)
sba_mark_clean(ioc, iova, size);
#endif
#if DELAYED_RESOURCE_CNT > 0
spin_lock_irqsave(&ioc->saved_lock, flags);
d = &(ioc->saved[ioc->saved_cnt]);
d->iova = iova;
d->size = size;
if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
int cnt = ioc->saved_cnt;
spin_lock(&ioc->res_lock);
while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size);
d--;
}
ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock(&ioc->res_lock);
}
spin_unlock_irqrestore(&ioc->saved_lock, flags);
#else /* DELAYED_RESOURCE_CNT == 0 */
spin_lock_irqsave(&ioc->res_lock, flags);
sba_mark_invalid(ioc, iova, size);
sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
sba_unmap_page(dev, iova, size, dir, attrs);
}
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
{
struct ioc *ioc;
void *addr;
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef CONFIG_NUMA
{
struct page *page;
page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
numa_node_id() : ioc->node, flags,
get_order(size));
if (unlikely(!page))
return NULL;
addr = page_address(page);
}
#else
addr = (void *) __get_free_pages(flags, get_order(size));
#endif
if (unlikely(!addr))
return NULL;
memset(addr, 0, size);
*dma_handle = virt_to_phys(addr);
#ifdef ALLOW_IOV_BYPASS
ASSERT(dev->coherent_dma_mask);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
dev->coherent_dma_mask, *dma_handle);
return addr;
}
#endif
/*
* If device can't bypass or bypass is disabled, pass the 32bit fake
* device to map single to get an iova mapping.
*/
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
size, 0, NULL);
return addr;
}
/**
* sba_free_coherent - free/unmap shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES
int dump_run_sg = 0;
#endif
/**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static SBA_INLINE int
sba_fill_pdir(
struct ioc *ioc,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
u64 *pdirp = NULL;
unsigned long dma_offset = 0;
while (nents-- > 0) {
int cnt = startsg->dma_length;
startsg->dma_length = 0;
#ifdef DEBUG_LARGE_SG_ENTRIES
if (dump_run_sg)
printk(" %2d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#else
DBG_RUN_SG(" %d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#endif
/*
** Look for the start of a new DMA stream
*/
if (startsg->dma_address & PIDE_FLAG) {
u32 pide = startsg->dma_address & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~iovp_mask;
startsg->dma_address = 0;
if (n_mappings)
dma_sg = sg_next(dma_sg);
dma_sg->dma_address = pide | ioc->ibase;
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
n_mappings++;
}
/*
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
dma_sg->dma_length += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, iovp_size);
do {
sba_io_pdir_entry(pdirp, vaddr);
vaddr += iovp_size;
cnt -= iovp_size;
pdirp++;
} while (cnt > 0);
}
startsg = sg_next(startsg);
}
/* force pdir update */
wmb();
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = 0;
#endif
return(n_mappings);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on an IOV page boundary.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
/**
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* First pass is to walk the SG list and determine where the breaks are
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA chunks.
*
* Doing the fill separate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/
static SBA_INLINE int
sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
int idx;
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
/*
** Prepare for first/next DMA stream
*/
dma_sg = vcontig_sg = startsg;
dma_len = vcontig_len = vcontig_end = startsg->length;
vcontig_end += vaddr;
dma_offset = vaddr & ~iovp_mask;
/* PARANOID: clear entries */
startsg->dma_address = startsg->dma_length = 0;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
*/
while (--nents > 0) {
unsigned long vaddr; /* tmp */
startsg = sg_next(startsg);
/* PARANOID */
startsg->dma_address = startsg->dma_length = 0;
/* catch brokenness in SCSI layer */
ASSERT(startsg->length <= DMA_CHUNK_SIZE);
/*
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
> DMA_CHUNK_SIZE)
break;
if (dma_len + startsg->length > max_seg_size)
break;
/*
** Then look for virtually contiguous blocks.
**
** append the next transaction?
*/
vaddr = (unsigned long) sba_sg_address(startsg);
if (vcontig_end == vaddr)
{
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = (vcontig_len > iovp_size);
#endif
/*
** Not virtually contiguous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
vcontig_sg->dma_length = vcontig_len;
vcontig_sg = startsg;
vcontig_len = startsg->length;
/*
** 3) do the entries end/start on page boundaries?
** Don't update vcontig_end until we've checked.
*/
if (DMA_CONTIG(vcontig_end, vaddr))
{
vcontig_end = vcontig_len + vaddr;
dma_len += vcontig_len;
continue;
} else {
break;
}
}
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
idx = sba_alloc_range(ioc, dev, dma_len);
if (idx < 0) {
dma_sg->dma_length = 0;
return -1;
}
dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
| dma_offset);
n_mappings++;
}
return n_mappings;
}
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
#ifdef ASSERT_PDIR_SANITY
unsigned long flags;
#endif
#ifdef ALLOW_IOV_BYPASS_SG
struct scatterlist *sg;
#endif
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS_SG
ASSERT(to_pci_dev(dev)->dma_mask);
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
for_each_sg(sglist, sg, nents, filled) {
sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg));
}
return filled;
}
#endif
/* Fast path single entry scatterlists. */
if (nents == 1) {
sglist->dma_length = sglist->length;
sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
return 1;
}
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check before sba_map_sg_attrs()");
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
prefetch(ioc->res_hint);
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
if (coalesced < 0) {
sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
return 0;
}
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
filled = sba_fill_pdir(ioc, sglist, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
{
sba_dump_sg(ioc, sglist, nents);
panic("Check after sba_map_sg_attrs()\n");
}
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
return filled;
}
/**
* sba_unmap_sg_attrs - unmap Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @dir: R/W or both.
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
unsigned long flags;
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sba_sg_address(sglist), sglist->length);
#ifdef ASSERT_PDIR_SANITY
ioc = GET_IOC(dev);
ASSERT(ioc);
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
while (nents && sglist->dma_length) {
sba_unmap_single_attrs(dev, sglist->dma_address,
sglist->dma_length, dir, attrs);
sglist = sg_next(sglist);
nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
}
/**************************************************************
*
* Initialization and claim
*
***************************************************************/
static void __init
ioc_iova_init(struct ioc *ioc)
{
int tcnfg;
int agp_found = 0;
struct pci_dev *device = NULL;
#ifdef FULL_VALID_PDIR
unsigned long index;
#endif
/*
** Firmware programs the base and size of a "safe IOVA space"
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
ioc->iov_size = ~ioc->imask + 1;
DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
__func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
ioc->iov_size >> 20);
switch (iovp_size) {
case 4*1024: tcnfg = 0; break;
case 8*1024: tcnfg = 1; break;
case 16*1024: tcnfg = 2; break;
case 64*1024: tcnfg = 3; break;
default:
panic(PFX "Unsupported IOTLB page size %ldK",
iovp_size >> 10);
break;
}
WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if (!ioc->pdir_base)
panic(PFX "Couldn't allocate I/O Page Table\n");
memset(ioc->pdir_base, 0, ioc->pdir_size);
DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
/*
** If an AGP device is present, only use half of the IOV space
** for PCI DMA. Unfortunately we can't know ahead of time
** whether GART support will actually be used, for now we
** can just key on an AGP device found in the system.
** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on.
*/
for_each_pci_dev(device)
agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
if (agp_found && reserve_sba_gart) {
printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
ioc->pdir_size /= 2;
((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
}
#ifdef FULL_VALID_PDIR
/*
** Check to see if the spill page has been allocated, we don't need more than
** one across multiple SBAs.
*/
if (!prefetch_spill_page) {
char *spill_poison = "SBAIOMMU POISON";
int poison_size = 16;
void *poison_addr, *addr;
addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
if (!addr)
panic(PFX "Couldn't allocate PDIR spill page\n");
poison_addr = addr;
for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
memcpy(poison_addr, spill_poison, poison_size);
prefetch_spill_page = virt_to_phys(addr);
DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
}
/*
** Set all the PDIR entries valid w/ the spill page as the target
*/
for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
#endif
/* Clear I/O TLB of any possible entries */
WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
READ_REG(ioc->ioc_hpa + IOC_PCOM);
/* Enable IOVA translation */
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
READ_REG(ioc->ioc_hpa + IOC_IBASE);
}
static void __init
ioc_resource_init(struct ioc *ioc)
{
spin_lock_init(&ioc->res_lock);
#if DELAYED_RESOURCE_CNT > 0
spin_lock_init(&ioc->saved_lock);
#endif
/* resource map size dictated by pdir_size */
ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
ioc->res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
get_order(ioc->res_size));
if (!ioc->res_map)
panic(PFX "Couldn't allocate resource map\n");
memset(ioc->res_map, 0, ioc->res_size);
/* next available IOVP - circular search */
ioc->res_hint = (unsigned long *) ioc->res_map;
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
ioc->res_map[0] = 0x1;
ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
#endif
#ifdef FULL_VALID_PDIR
/* Mark the last resource used so we don't prefetch beyond IOVA space */
ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
| prefetch_spill_page);
#endif
DBG_INIT("%s() res_map %x %p\n", __func__,
ioc->res_size, (void *) ioc->res_map);
}
static void __init
ioc_sac_init(struct ioc *ioc)
{
struct pci_dev *sac = NULL;
struct pci_controller *controller = NULL;
/*
* pci_alloc_coherent() must return a DMA address which is
* SAC (single address cycle) addressable, so allocate a
* pseudo-device to enforce that.
*/
sac = kzalloc(sizeof(*sac), GFP_KERNEL);
if (!sac)
panic(PFX "Couldn't allocate struct pci_dev");
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
panic(PFX "Couldn't allocate struct pci_controller");
controller->iommu = ioc;
sac->sysdata = controller;
sac->dma_mask = 0xFFFFFFFFUL;
#ifdef CONFIG_PCI
sac->dev.bus = &pci_bus_type;
#endif
ioc->sac_only_dev = sac;
}
static void __init
ioc_zx1_init(struct ioc *ioc)
{
unsigned long rope_config;
unsigned int i;
if (ioc->rev < 0x20)
panic(PFX "IOC 2.0 or later required for IOMMU support\n");
/* 38 bit memory controller + extra bit for range displaced by MMIO */
ioc->dma_mask = (0x1UL << 39) - 1;
/*
** Clear ROPE(N)_CONFIG AO bit.
** Disables "NT Ordering" (~= !"Relaxed Ordering")
** Overrides bit 1 in DMA Hint Sets.
** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
*/
for (i=0; i<(8*8); i+=8) {
rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
rope_config &= ~IOC_ROPE_AO;
WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
}
}
typedef void (initfunc)(struct ioc *);
struct ioc_iommu {
u32 func_id;
char *name;
initfunc *init;
};
static struct ioc_iommu ioc_iommu_info[] __initdata = {
{ ZX1_IOC_ID, "zx1", ioc_zx1_init },
{ ZX2_IOC_ID, "zx2", NULL },
{ SX1000_IOC_ID, "sx1000", NULL },
{ SX2000_IOC_ID, "sx2000", NULL },
};
static struct ioc * __init
ioc_init(unsigned long hpa, void *handle)
{
struct ioc *ioc;
struct ioc_iommu *info;
ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc)
return NULL;
ioc->next = ioc_list;
ioc_list = ioc;
ioc->handle = handle;
ioc->ioc_hpa = ioremap(hpa, 0x1000);
ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
if (ioc->func_id == info->func_id) {
ioc->name = info->name;
if (info->init)
(info->init)(ioc);
}
}
iovp_size = (1 << iovp_shift);
iovp_mask = ~(iovp_size - 1);
DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
PAGE_SIZE >> 10, iovp_size >> 10);
if (!ioc->name) {
ioc->name = kmalloc(24, GFP_KERNEL);
if (ioc->name)
sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
else
ioc->name = "Unknown";
}
ioc_iova_init(ioc);
ioc_resource_init(ioc);
ioc_sac_init(ioc);
if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
ia64_max_iommu_merge_mask = ~iovp_mask;
printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
hpa, ioc->iov_size >> 20, ioc->ibase);
return ioc;
}
/**************************************************************************
**
** SBA initialization code (HW and SW)
**
** o identify SBA chip itself
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
#ifdef CONFIG_PROC_FS
static void *
ioc_start(struct seq_file *s, loff_t *pos)
{
struct ioc *ioc;
loff_t n = *pos;
for (ioc = ioc_list; ioc; ioc = ioc->next)
if (!n--)
return ioc;
return NULL;
}
static void *
ioc_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ioc *ioc = v;
++*pos;
return ioc->next;
}
static void
ioc_stop(struct seq_file *s, void *v)
{
}
static int
ioc_show(struct seq_file *s, void *v)
{
struct ioc *ioc = v;
unsigned long *res_ptr = (unsigned long *)ioc->res_map;
int i, used = 0;
seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
#ifdef CONFIG_NUMA
if (ioc->node != MAX_NUMNODES)
seq_printf(s, "NUMA node : %d\n", ioc->node);
#endif
seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
used += hweight64(*res_ptr);
seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
seq_printf(s, "PDIR used : %d entries\n", used);
#ifdef PDIR_SEARCH_TIMING
{
unsigned long i = 0, avg = 0, min, max;
min = max = ioc->avg_search[0];
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
avg += ioc->avg_search[i];
if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
min, avg, max);
}
#endif
#ifndef ALLOW_IOV_BYPASS
seq_printf(s, "IOVA bypass disabled\n");
#endif
return 0;
}
static const struct seq_operations ioc_seq_ops = {
.start = ioc_start,
.next = ioc_next,
.stop = ioc_stop,
.show = ioc_show
};
static int
ioc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ioc_seq_ops);
}
static const struct file_operations ioc_fops = {
.open = ioc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release
};
static void __init
ioc_proc_init(void)
{
struct proc_dir_entry *dir;
dir = proc_mkdir("bus/mckinley", NULL);
if (!dir)
return;
proc_create(ioc_list->name, 0, dir, &ioc_fops);
}
#endif
static void
sba_connect_bus(struct pci_bus *bus)
{
acpi_handle handle, parent;
acpi_status status;
struct ioc *ioc;
if (!PCI_CONTROLLER(bus))
panic(PFX "no sysdata on bus %d!\n", bus->number);
if (PCI_CONTROLLER(bus)->iommu)
return;
handle = PCI_CONTROLLER(bus)->acpi_handle;
if (!handle)
return;
/*
* The IOC scope encloses PCI root bridges in the ACPI
* namespace, so work our way out until we find an IOC we
* claimed previously.
*/
do {
for (ioc = ioc_list; ioc; ioc = ioc->next)
if (ioc->handle == handle) {
PCI_CONTROLLER(bus)->iommu = ioc;
return;
}
status = acpi_get_parent(handle, &parent);
handle = parent;
} while (ACPI_SUCCESS(status));
printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
}
#ifdef CONFIG_NUMA
static void __init
sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
{
unsigned int node;
int pxm;
ioc->node = MAX_NUMNODES;
pxm = acpi_get_pxm(handle);
if (pxm < 0)
return;
node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node))
return;
ioc->node = node;
return;
}
#else
#define sba_map_ioc_to_node(ioc, handle)
#endif
static int __init
acpi_sba_ioc_add(struct acpi_device *device)
{
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status))
return 1;
/*
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
kfree(adi);
/*
* default anything not caught above or specified on cmdline to 4k
* iommu page size
*/
if (!iovp_shift)
iovp_shift = 12;
ioc = ioc_init(hpa, device->handle);
if (!ioc)
return 1;
/* setup NUMA node association */
sba_map_ioc_to_node(ioc, device->handle);
return 0;
}
static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
{"HWP0001", 0},
{"HWP0004", 0},
{"", 0},
};
static struct acpi_driver acpi_sba_ioc_driver = {
.name = "IOC IOMMU Driver",
.ids = hp_ioc_iommu_device_ids,
.ops = {
.add = acpi_sba_ioc_add,
},
};
extern struct dma_map_ops swiotlb_dma_ops;
static int __init
sba_init(void)
{
if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
return 0;
#if defined(CONFIG_IA64_GENERIC)
/* If we are booting a kdump kernel, the sba_iommu will
* cause devices that were not shutdown properly to MCA
* as soon as they are turned back on. Our only option for
* a successful kdump kernel boot is to use the swiotlb.
*/
if (is_kdump_kernel()) {
dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
machvec_init("dig");
return 0;
}
#endif
acpi_bus_register_driver(&acpi_sba_ioc_driver);
if (!ioc_list) {
#ifdef CONFIG_IA64_GENERIC
/*
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
machvec_init("dig");
#else
panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
#endif
return 0;
}
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
/*
* hpzx1_swiotlb needs to have a fairly small swiotlb bounce
* buffer setup to support devices with smaller DMA masks than
* sba_iommu can handle.
*/
if (ia64_platform_is("hpzx1_swiotlb")) {
extern void hwsw_init(void);
hwsw_init();
}
#endif
#ifdef CONFIG_PCI
{
struct pci_bus *b = NULL;
while ((b = pci_find_next_bus(b)) != NULL)
sba_connect_bus(b);
}
#endif
#ifdef CONFIG_PROC_FS
ioc_proc_init();
#endif
return 0;
}
subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
static int __init
nosbagart(char *str)
{
reserve_sba_gart = 0;
return 1;
}
static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
__setup("nosbagart", nosbagart);
static int __init
sba_page_override(char *str)
{
unsigned long page_size;
page_size = memparse(str, &str);
switch (page_size) {
case 4096:
case 8192:
case 16384:
case 65536:
iovp_shift = ffs(page_size) - 1;
break;
default:
printk("%s: unknown/unsupported iommu page size %ld\n",
__func__, page_size);
}
return 1;
}
__setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = {
.alloc_coherent = sba_alloc_coherent,
.free_coherent = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.sync_single_for_cpu = machvec_dma_sync_single,
.sync_sg_for_cpu = machvec_dma_sync_sg,
.sync_single_for_device = machvec_dma_sync_single,
.sync_sg_for_device = machvec_dma_sync_sg,
.dma_supported = sba_dma_supported,
.mapping_error = sba_dma_mapping_error,
};
void sba_dma_init(void)
{
dma_ops = &sba_dma_ops;
}
| gpl-2.0 |
cphelps76/DEMENTED_kernel_grouper | drivers/mfd/wm8994-core.c | 270 | 13833 | /*
* wm8994-core.c -- Device access for Wolfson WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/registers.h>
static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
int bytes, void *dest)
{
return regmap_raw_read(wm8994->regmap, reg, dest, bytes);
}
/**
* wm8994_reg_read: Read a single WM8994 register.
*
* @wm8994: Device to read from.
* @reg: Register to read.
*/
int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
{
unsigned int val;
int ret;
ret = regmap_read(wm8994->regmap, reg, &val);
if (ret < 0)
return ret;
else
return val;
}
EXPORT_SYMBOL_GPL(wm8994_reg_read);
/**
* wm8994_bulk_read: Read multiple WM8994 registers
*
* @wm8994: Device to read from
* @reg: First register
* @count: Number of registers
* @buf: Buffer to fill. The data will be returned big endian.
*/
int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
int count, u16 *buf)
{
return regmap_bulk_read(wm8994->regmap, reg, buf, count);
}
static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
int bytes, const void *src)
{
return regmap_raw_write(wm8994->regmap, reg, src, bytes);
}
/**
* wm8994_reg_write: Write a single WM8994 register.
*
* @wm8994: Device to write to.
* @reg: Register to write to.
* @val: Value to write.
*/
int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
unsigned short val)
{
return regmap_write(wm8994->regmap, reg, val);
}
EXPORT_SYMBOL_GPL(wm8994_reg_write);
/**
* wm8994_bulk_write: Write multiple WM8994 registers
*
* @wm8994: Device to write to
* @reg: First register
* @count: Number of registers
* @buf: Buffer to write from. Data must be big-endian formatted.
*/
int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
int count, const u16 *buf)
{
return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
}
EXPORT_SYMBOL_GPL(wm8994_bulk_write);
/**
* wm8994_set_bits: Set the value of a bitfield in a WM8994 register
*
* @wm8994: Device to write to.
* @reg: Register to write to.
* @mask: Mask of bits to set.
* @val: Value to set (unshifted)
*/
int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
unsigned short mask, unsigned short val)
{
return regmap_update_bits(wm8994->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(wm8994_set_bits);
static struct mfd_cell wm8994_regulator_devs[] = {
{
.name = "wm8994-ldo",
.id = 1,
.pm_runtime_no_callbacks = true,
},
{
.name = "wm8994-ldo",
.id = 2,
.pm_runtime_no_callbacks = true,
},
};
static struct resource wm8994_codec_resources[] = {
{
.start = WM8994_IRQ_TEMP_SHUT,
.end = WM8994_IRQ_TEMP_WARN,
.flags = IORESOURCE_IRQ,
},
};
static struct resource wm8994_gpio_resources[] = {
{
.start = WM8994_IRQ_GPIO(1),
.end = WM8994_IRQ_GPIO(11),
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell wm8994_devs[] = {
{
.name = "wm8994-codec",
.num_resources = ARRAY_SIZE(wm8994_codec_resources),
.resources = wm8994_codec_resources,
},
{
.name = "wm8994-gpio",
.num_resources = ARRAY_SIZE(wm8994_gpio_resources),
.resources = wm8994_gpio_resources,
.pm_runtime_no_callbacks = true,
},
};
/*
* Supplies for the main bulk of CODEC; the LDO supplies are ignored
* and should be handled via the standard regulator API supply
* management.
*/
static const char *wm8994_main_supplies[] = {
"DBVDD",
"DCVDD",
"AVDD1",
"AVDD2",
"CPVDD",
"SPKVDD1",
"SPKVDD2",
};
static const char *wm8958_main_supplies[] = {
"DBVDD1",
"DBVDD2",
"DBVDD3",
"DCVDD",
"AVDD1",
"AVDD2",
"CPVDD",
"SPKVDD1",
"SPKVDD2",
};
#ifdef CONFIG_PM
static int wm8994_suspend(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
/* Don't actually go through with the suspend if the CODEC is
* still active (eg, for audio passthrough from CP. */
ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
} else if (ret & WM8994_VMID_SEL_MASK) {
dev_dbg(dev, "CODEC still active, ignoring suspend\n");
return 0;
}
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
*/
ret = wm8994_read(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
&wm8994->gpio_regs);
if (ret < 0)
dev_err(dev, "Failed to save GPIO registers: %d\n", ret);
/* For similar reasons we also stash the regulator states */
ret = wm8994_read(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
/* Explicitly put the device into reset in case regulators
* don't get disabled in order to ensure consistent restart.
*/
wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
wm8994->suspended = true;
ret = regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(dev, "Failed to disable supplies: %d\n", ret);
return ret;
}
return 0;
}
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret, i;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
return 0;
ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n", ret);
return ret;
}
/* Write register at a time as we use the cache on the CPU so store
* it in native endian.
*/
for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
+ i, wm8994->irq_masks_cur[i]);
if (ret < 0)
dev_err(dev, "Failed to restore interrupt masks: %d\n",
ret);
}
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
if (ret < 0)
dev_err(dev, "Failed to restore LDO registers: %d\n", ret);
ret = wm8994_write(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
&wm8994->gpio_regs);
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
wm8994->suspended = false;
return 0;
}
#endif
#ifdef CONFIG_REGULATOR
static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
{
struct wm8994_ldo_pdata *ldo_pdata;
if (!pdata)
return 0;
ldo_pdata = &pdata->ldo[ldo];
if (!ldo_pdata->init_data)
return 0;
return ldo_pdata->init_data->num_consumer_supplies != 0;
}
#else
static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
{
return 0;
}
#endif
static struct regmap_config wm8994_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
};
/*
* Instantiate the generic non-control parts of the device.
*/
static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
const char *devname;
int ret, i;
dev_set_drvdata(wm8994->dev, wm8994);
/* Add the on-chip regulators first for bootstrapping */
ret = mfd_add_devices(wm8994->dev, -1,
wm8994_regulator_devs,
ARRAY_SIZE(wm8994_regulator_devs),
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
goto err_regmap;
}
switch (wm8994->type) {
case WM8994:
wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
break;
case WM8958:
wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
break;
default:
BUG();
goto err_regmap;
}
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
wm8994->num_supplies,
GFP_KERNEL);
if (!wm8994->supplies) {
ret = -ENOMEM;
goto err_regmap;
}
switch (wm8994->type) {
case WM8994:
for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
wm8994->supplies[i].supply = wm8994_main_supplies[i];
break;
case WM8958:
for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
wm8994->supplies[i].supply = wm8958_main_supplies[i];
break;
default:
BUG();
goto err_regmap;
}
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
goto err_supplies;
}
ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
if (ret < 0) {
dev_err(wm8994->dev, "Failed to read ID register\n");
goto err_enable;
}
switch (ret) {
case 0x8994:
devname = "WM8994";
if (wm8994->type != WM8994)
dev_warn(wm8994->dev, "Device registered as type %d\n",
wm8994->type);
wm8994->type = WM8994;
break;
case 0x8958:
devname = "WM8958";
if (wm8994->type != WM8958)
dev_warn(wm8994->dev, "Device registered as type %d\n",
wm8994->type);
wm8994->type = WM8958;
break;
default:
dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
ret);
ret = -EINVAL;
goto err_enable;
}
ret = wm8994_reg_read(wm8994, WM8994_CHIP_REVISION);
if (ret < 0) {
dev_err(wm8994->dev, "Failed to read revision register: %d\n",
ret);
goto err_enable;
}
switch (wm8994->type) {
case WM8994:
switch (ret) {
case 0:
case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
'A' + ret);
break;
default:
break;
}
break;
default:
break;
}
dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
if (pdata) {
wm8994->irq_base = pdata->irq_base;
wm8994->gpio_base = pdata->gpio_base;
/* GPIO configuration is only applied if it's non-zero */
for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
if (pdata->gpio_defaults[i]) {
wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
0xffff,
pdata->gpio_defaults[i]);
}
}
}
/* In some system designs where the regulators are not in use,
* we can achieve a small reduction in leakage currents by
* floating LDO outputs. This bit makes no difference if the
* LDOs are enabled, it only affects cases where the LDOs were
* in operation and are then disabled.
*/
for (i = 0; i < WM8994_NUM_LDO_REGS; i++) {
if (wm8994_ldo_in_use(pdata, i))
wm8994_set_bits(wm8994, WM8994_LDO_1 + i,
WM8994_LDO1_DISCH, WM8994_LDO1_DISCH);
else
wm8994_set_bits(wm8994, WM8994_LDO_1 + i,
WM8994_LDO1_DISCH, 0);
}
wm8994_irq_init(wm8994);
ret = mfd_add_devices(wm8994->dev, -1,
wm8994_devs, ARRAY_SIZE(wm8994_devs),
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
goto err_irq;
}
pm_runtime_enable(wm8994->dev);
pm_runtime_resume(wm8994->dev);
return 0;
err_irq:
wm8994_irq_exit(wm8994);
err_enable:
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
err_get:
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
err_supplies:
kfree(wm8994->supplies);
err_regmap:
regmap_exit(wm8994->regmap);
mfd_remove_devices(wm8994->dev);
kfree(wm8994);
return ret;
}
static void wm8994_device_exit(struct wm8994 *wm8994)
{
pm_runtime_disable(wm8994->dev);
mfd_remove_devices(wm8994->dev);
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
kfree(wm8994->supplies);
regmap_exit(wm8994->regmap);
kfree(wm8994);
}
static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8994 *wm8994;
int ret;
wm8994 = kzalloc(sizeof(struct wm8994), GFP_KERNEL);
if (wm8994 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8994);
wm8994->dev = &i2c->dev;
wm8994->irq = i2c->irq;
wm8994->type = id->driver_data;
wm8994->regmap = regmap_init_i2c(i2c, &wm8994_regmap_config);
if (IS_ERR(wm8994->regmap)) {
ret = PTR_ERR(wm8994->regmap);
dev_err(wm8994->dev, "Failed to allocate register map: %d\n",
ret);
kfree(wm8994);
return ret;
}
return wm8994_device_init(wm8994, i2c->irq);
}
static int wm8994_i2c_remove(struct i2c_client *i2c)
{
struct wm8994 *wm8994 = i2c_get_clientdata(i2c);
wm8994_device_exit(wm8994);
return 0;
}
static const struct i2c_device_id wm8994_i2c_id[] = {
{ "wm8994", WM8994 },
{ "wm8958", WM8958 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
static UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume,
NULL);
static struct i2c_driver wm8994_i2c_driver = {
.driver = {
.name = "wm8994",
.owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
.id_table = wm8994_i2c_id,
};
static int __init wm8994_i2c_init(void)
{
int ret;
ret = i2c_add_driver(&wm8994_i2c_driver);
if (ret != 0)
pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
return ret;
}
module_init(wm8994_i2c_init);
static void __exit wm8994_i2c_exit(void)
{
i2c_del_driver(&wm8994_i2c_driver);
}
module_exit(wm8994_i2c_exit);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
| gpl-2.0 |
Gabriel85/android_kernel_sony_apq8064 | drivers/gpu/msm/adreno_snapshot.c | 782 | 17499 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_snapshot.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
#include "adreno_cp_parser.h"
/* Number of dwords of ringbuffer history to record */
#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
/* Maintain a list of the objects we see during parsing */
#define SNAPSHOT_OBJ_BUFSIZE 64
#define SNAPSHOT_OBJ_TYPE_IB 0
/* Keep track of how many bytes are frozen after a snapshot and tell the user */
static int snapshot_frozen_objsize;
static struct kgsl_snapshot_obj {
int type;
uint32_t gpuaddr;
phys_addr_t ptbase;
void *ptr;
int dwords;
struct kgsl_mem_entry *entry;
} objbuf[SNAPSHOT_OBJ_BUFSIZE];
/* Pointer to the next open entry in the object list */
static int objbufptr;
/* Push a new buffer object onto the list */
static void push_object(struct kgsl_device *device, int type,
phys_addr_t ptbase,
uint32_t gpuaddr, int dwords)
{
int index;
void *ptr;
struct kgsl_mem_entry *entry = NULL;
/*
* Sometimes IBs can be reused in the same dump. Because we parse from
* oldest to newest, if we come across an IB that has already been used,
* assume that it has been reused and update the list with the newest
* size.
*/
for (index = 0; index < objbufptr; index++) {
if (objbuf[index].gpuaddr == gpuaddr &&
objbuf[index].ptbase == ptbase) {
objbuf[index].dwords = dwords;
return;
}
}
if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n");
return;
}
/*
* adreno_convertaddr verifies that the IB size is valid - at least in
* the context of it being smaller then the allocated memory space
*/
ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2, &entry);
if (ptr == NULL) {
KGSL_DRV_ERR(device,
"snapshot: Can't find GPU address for %x\n", gpuaddr);
return;
}
/* Put it on the list of things to parse */
objbuf[objbufptr].type = type;
objbuf[objbufptr].gpuaddr = gpuaddr;
objbuf[objbufptr].ptbase = ptbase;
objbuf[objbufptr].dwords = dwords;
objbuf[objbufptr].entry = entry;
objbuf[objbufptr++].ptr = ptr;
}
/*
* Return a 1 if the specified object is already on the list of buffers
* to be dumped
*/
static int find_object(int type, unsigned int gpuaddr, phys_addr_t ptbase)
{
int index;
for (index = 0; index < objbufptr; index++) {
if (objbuf[index].gpuaddr == gpuaddr &&
objbuf[index].ptbase == ptbase &&
objbuf[index].type == type)
return 1;
}
return 0;
}
/*
* snapshot_freeze_obj_list() - Take a list of ib objects and freeze their
* memory for snapshot
* @device: Device being snapshotted
* @ptbase: The pagetable base of the process to which IB belongs
* @ib_obj_list: List of the IB objects
*
* Returns 0 on success else error code
*/
static int snapshot_freeze_obj_list(struct kgsl_device *device,
phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list)
{
int ret = 0;
struct adreno_ib_object *ib_objs;
unsigned int ib2base;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ib2base);
for (i = 0; i < ib_obj_list->num_objs; i++) {
int temp_ret;
int index;
int freeze = 1;
ib_objs = &(ib_obj_list->obj_list[i]);
/* Make sure this object is not going to be saved statically */
for (index = 0; index < objbufptr; index++) {
if ((objbuf[index].gpuaddr <= ib_objs->gpuaddr) &&
((objbuf[index].gpuaddr +
(objbuf[index].dwords << 2)) >=
(ib_objs->gpuaddr + ib_objs->size)) &&
(objbuf[index].ptbase == ptbase)) {
freeze = 0;
break;
}
}
if (freeze) {
/* Save current IB2 statically */
if (ib2base == ib_objs->gpuaddr) {
push_object(device, SNAPSHOT_OBJ_TYPE_IB,
ptbase, ib_objs->gpuaddr, ib_objs->size >> 2);
} else {
temp_ret = kgsl_snapshot_get_object(device,
ptbase, ib_objs->gpuaddr, ib_objs->size,
ib_objs->snapshot_obj_type);
if (temp_ret < 0) {
if (ret >= 0)
ret = temp_ret;
} else {
snapshot_frozen_objsize += temp_ret;
}
}
}
}
return ret;
}
/*
* We want to store the last executed IB1 and IB2 in the static region to ensure
* that we get at least some information out of the snapshot even if we can't
* access the dynamic data from the sysfs file. Push all other IBs on the
* dynamic list
*/
static inline int parse_ib(struct kgsl_device *device, phys_addr_t ptbase,
unsigned int gpuaddr, unsigned int dwords)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int ib1base;
int ret = 0;
struct adreno_ib_object_list *ib_obj_list;
/*
* Check the IB address - if it is either the last executed IB1
* then push it into the static blob otherwise put it in the dynamic
* list
*/
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ib1base);
if (gpuaddr == ib1base) {
push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
gpuaddr, dwords);
goto done;
}
if (kgsl_snapshot_have_object(device, ptbase, gpuaddr, dwords << 2))
goto done;
ret = adreno_ib_create_object_list(device, ptbase,
gpuaddr, dwords, &ib_obj_list);
if (ret)
goto done;
ret = kgsl_snapshot_add_ib_obj_list(device, ptbase, ib_obj_list);
if (ret)
adreno_ib_destroy_obj_list(ib_obj_list);
done:
return ret;
}
/* Snapshot the ringbuffer memory */
static int snapshot_rb(struct kgsl_device *device, void *snapshot,
int remain, void *priv)
{
struct kgsl_snapshot_rb *header = snapshot;
unsigned int *data = snapshot + sizeof(*header);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int rptr, *rbptr, ibbase;
phys_addr_t ptbase;
int index, size, i;
int parse_ibs = 0, ib_parse_start;
/* Get the physical address of the MMU pagetable */
ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
/* Get the current read pointers for the RB */
adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
/* Address of the last processed IB */
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ibbase);
/*
* Figure out the window of ringbuffer data to dump. First we need to
* find where the last processed IB ws submitted. Start walking back
* from the rptr
*/
index = rptr;
rbptr = rb->buffer_desc.hostptr;
do {
index--;
if (index < 0) {
index = rb->sizedwords - 3;
/* We wrapped without finding what we wanted */
if (index < rb->wptr) {
index = rb->wptr;
break;
}
}
if (adreno_cmd_is_ib(rbptr[index]) &&
rbptr[index + 1] == ibbase)
break;
} while (index != rb->wptr);
/*
* index points at the last submitted IB. We can only trust that the
* memory between the context switch and the hanging IB is valid, so
* the next step is to find the context switch before the submission
*/
while (index != rb->wptr) {
index--;
if (index < 0) {
index = rb->sizedwords - 2;
/*
* Wrapped without finding the context switch. This is
* harmless - we should still have enough data to dump a
* valid state
*/
if (index < rb->wptr) {
index = rb->wptr;
break;
}
}
/* Break if the current packet is a context switch identifier */
if ((rbptr[index] == cp_nop_packet(1)) &&
(rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER))
break;
}
/*
* Index represents the start of the window of interest. We will try
* to dump all buffers between here and the rptr
*/
ib_parse_start = index;
/*
* Dump the entire ringbuffer - the parser can choose how much of it to
* process
*/
size = (rb->sizedwords << 2);
if (remain < size + sizeof(*header)) {
KGSL_DRV_ERR(device,
"snapshot: Not enough memory for the rb section");
return 0;
}
/* Write the sub-header for the section */
header->start = rb->wptr;
header->end = rb->wptr;
header->wptr = rb->wptr;
header->rbsize = rb->sizedwords;
header->count = rb->sizedwords;
/*
* Loop through the RB, copying the data and looking for indirect
* buffers and MMU pagetable changes
*/
index = rb->wptr;
for (i = 0; i < rb->sizedwords; i++) {
*data = rbptr[index];
/*
* Only parse IBs between the start and the rptr or the next
* context switch, whichever comes first
*/
if (parse_ibs == 0 && index == ib_parse_start)
parse_ibs = 1;
else if (index == rptr || adreno_rb_ctxtswitch(&rbptr[index]))
parse_ibs = 0;
if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) {
unsigned int ibaddr = rbptr[index + 1];
unsigned int ibsize = rbptr[index + 2];
/*
* This will return non NULL if the IB happens to be
* part of the context memory (i.e - context switch
* command buffers)
*/
struct kgsl_memdesc *memdesc =
adreno_find_ctxtmem(device, ptbase, ibaddr,
ibsize << 2);
/* IOMMU uses a NOP IB placed in setsate memory */
if (NULL == memdesc)
if (kgsl_gpuaddr_in_memdesc(
&device->mmu.setstate_memory,
ibaddr, ibsize << 2))
memdesc = &device->mmu.setstate_memory;
/*
* The IB from CP_IB1_BASE and the IBs for legacy
* context switch go into the snapshot all
* others get marked at GPU objects
*/
if (memdesc != NULL)
push_object(device, SNAPSHOT_OBJ_TYPE_IB,
ptbase, ibaddr, ibsize);
else
parse_ib(device, ptbase, ibaddr, ibsize);
}
index = index + 1;
if (index == rb->sizedwords)
index = 0;
data++;
}
/* Return the size of the section */
return size + sizeof(*header);
}
static int snapshot_capture_mem_list(struct kgsl_device *device, void *snapshot,
int remain, void *priv)
{
struct kgsl_snapshot_replay_mem_list *header = snapshot;
struct kgsl_process_private *private = NULL;
struct kgsl_process_private *tmp_private;
phys_addr_t ptbase;
struct rb_node *node;
struct kgsl_mem_entry *entry = NULL;
int num_mem;
unsigned int *data = snapshot + sizeof(*header);
ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
mutex_lock(&kgsl_driver.process_mutex);
list_for_each_entry(tmp_private, &kgsl_driver.process_list, list) {
if (kgsl_mmu_pt_equal(&device->mmu, tmp_private->pagetable,
ptbase)) {
private = tmp_private;
break;
}
}
mutex_unlock(&kgsl_driver.process_mutex);
if (!private) {
KGSL_DRV_ERR(device,
"Failed to get pointer to process private structure\n");
return 0;
}
/* We need to know the number of memory objects that the process has */
spin_lock(&private->mem_lock);
for (node = rb_first(&private->mem_rb), num_mem = 0; node; ) {
entry = rb_entry(node, struct kgsl_mem_entry, node);
node = rb_next(&entry->node);
num_mem++;
}
if (remain < ((num_mem * 3 * sizeof(unsigned int)) +
sizeof(*header))) {
KGSL_DRV_ERR(device,
"snapshot: Not enough memory for the mem list section");
spin_unlock(&private->mem_lock);
return 0;
}
header->num_entries = num_mem;
header->ptbase = (__u32)ptbase;
/*
* Walk throught the memory list and store the
* tuples(gpuaddr, size, memtype) in snapshot
*/
for (node = rb_first(&private->mem_rb); node; ) {
entry = rb_entry(node, struct kgsl_mem_entry, node);
node = rb_next(&entry->node);
*data++ = entry->memdesc.gpuaddr;
*data++ = entry->memdesc.size;
*data++ = (entry->memdesc.priv & KGSL_MEMTYPE_MASK) >>
KGSL_MEMTYPE_SHIFT;
}
spin_unlock(&private->mem_lock);
return sizeof(*header) + (num_mem * 3 * sizeof(unsigned int));
}
/* Snapshot the memory for an indirect buffer */
static int snapshot_ib(struct kgsl_device *device, void *snapshot,
int remain, void *priv)
{
struct kgsl_snapshot_ib *header = snapshot;
struct kgsl_snapshot_obj *obj = priv;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int *src = obj->ptr;
unsigned int *dst = snapshot + sizeof(*header);
struct adreno_ib_object_list *ib_obj_list;
unsigned int ib1base;
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ib1base);
if (remain < (obj->dwords << 2) + sizeof(*header)) {
KGSL_DRV_ERR(device,
"snapshot: Not enough memory for the ib section");
return 0;
}
/* only do this for IB1 because the IB2's are part of IB1 objects */
if (ib1base == obj->gpuaddr) {
if (!adreno_ib_create_object_list(device, obj->ptbase,
obj->gpuaddr, obj->dwords,
&ib_obj_list)) {
/* freeze the IB objects in the IB */
snapshot_freeze_obj_list(device, obj->ptbase,
ib_obj_list);
adreno_ib_destroy_obj_list(ib_obj_list);
}
}
/* Write the sub-header for the section */
header->gpuaddr = obj->gpuaddr;
header->ptbase = (__u32)obj->ptbase;
header->size = obj->dwords;
/* Write the contents of the ib */
memcpy((void *)dst, (void *)src, obj->dwords << 2);
/* Write the contents of the ib */
return (obj->dwords << 2) + sizeof(*header);
}
/* Dump another item on the current pending list */
static void *dump_object(struct kgsl_device *device, int obj, void *snapshot,
int *remain)
{
switch (objbuf[obj].type) {
case SNAPSHOT_OBJ_TYPE_IB:
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_IB, snapshot, remain,
snapshot_ib, &objbuf[obj]);
if (objbuf[obj].entry) {
kgsl_memdesc_unmap(&(objbuf[obj].entry->memdesc));
kgsl_mem_entry_put(objbuf[obj].entry);
}
break;
default:
KGSL_DRV_ERR(device,
"snapshot: Invalid snapshot object type: %d\n",
objbuf[obj].type);
break;
}
return snapshot;
}
/* adreno_snapshot - Snapshot the Adreno GPU state
* @device - KGSL device to snapshot
* @snapshot - Pointer to the start of memory to write into
* @remain - A pointer to how many bytes of memory are remaining in the snapshot
* @hang - set if this snapshot was automatically triggered by a GPU hang
* This is a hook function called by kgsl_snapshot to snapshot the
* Adreno specific information for the GPU snapshot. In turn, this function
* calls the GPU specific snapshot function to get core specific information.
*/
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang)
{
int i;
uint32_t ibbase, ibsize;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
phys_addr_t ptbase;
/* Reset the list of objects */
objbufptr = 0;
snapshot_frozen_objsize = 0;
/* Get the physical address of the MMU pagetable */
ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
/* Dump the ringbuffer */
snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB,
snapshot, remain, snapshot_rb, NULL);
/*
* Add a section that lists (gpuaddr, size, memtype) tuples of the
* hanging process
*/
snapshot = kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_MEMLIST, snapshot, remain,
snapshot_capture_mem_list, NULL);
/*
* Make sure that the last IB1 that was being executed is dumped.
* Since this was the last IB1 that was processed, we should have
* already added it to the list during the ringbuffer parse but we
* want to be double plus sure.
*/
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ibbase);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &ibsize);
/*
* The problem is that IB size from the register is the unprocessed size
* of the buffer not the original size, so if we didn't catch this
* buffer being directly used in the RB, then we might not be able to
* dump the whle thing. Print a warning message so we can try to
* figure how often this really happens.
*/
if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
ibbase, ibsize);
KGSL_DRV_ERR(device, "CP_IB1_BASE not found in the ringbuffer. "
"Dumping %x dwords of the buffer.\n", ibsize);
}
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ibbase);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ibsize);
/*
* Add the last parsed IB2 to the list. The IB2 should be found as we
* parse the objects below, but we try to add it to the list first, so
* it too can be parsed. Don't print an error message in this case - if
* the IB2 is found during parsing, the list will be updated with the
* correct size.
*/
if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
ibbase, ibsize);
}
/*
* Go through the list of found objects and dump each one. As the IBs
* are parsed, more objects might be found, and objbufptr will increase
*/
for (i = 0; i < objbufptr; i++)
snapshot = dump_object(device, i, snapshot, remain);
/* Add GPU specific sections - registers mainly, but other stuff too */
if (adreno_dev->gpudev->snapshot)
snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
remain, hang);
if (snapshot_frozen_objsize)
KGSL_DRV_ERR(device, "GPU snapshot froze %dKb of GPU buffers\n",
snapshot_frozen_objsize / 1024);
/*
* Queue a work item that will save the IB data in snapshot into
* static memory to prevent loss of data due to overwriting of
* memory
*/
queue_work(device->work_queue, &device->snapshot_obj_ws);
return snapshot;
}
| gpl-2.0 |
AOSPA-L/android_kernel_oneplus_msm8974 | drivers/scsi/arcmsr/arcmsr_hba.c | 782 | 99826 | /*
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_hba.c
** BY : Nick Cheng
** Description: SCSI RAID Device Driver for
** ARECA RAID Host adapter
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
** E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
*******************************************************************************
*/
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsicam.h>
#include "arcmsr.h"
MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
wait_queue_head_t wait_q;
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd);
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int *info);
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(unsigned long pacb);
static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
int queue_depth, int reason)
{
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
queue_depth = ARCMSR_MAX_CMD_PERLUN;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
return queue_depth;
}
static struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
.name = "ARCMSR ARECA SATA/SAS RAID Controller"
ARCMSR_DRIVER_VERSION,
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_MAX_FREECCB_NUM,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
.cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
};
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.shutdown = arcmsr_shutdown,
};
/*
****************************************************************************
****************************************************************************
*/
static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
case ACB_ADAPTER_TYPE_C:
break;
case ACB_ADAPTER_TYPE_B:{
dma_free_coherent(&acb->pdev->dev,
sizeof(struct MessageUnit_B),
acb->pmuB, acb->dma_coherent_handle_hbb_mu);
}
}
}
static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
{
struct pci_dev *pdev = acb->pdev;
switch (acb->adapter_type){
case ACB_ADAPTER_TYPE_A:{
acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
if (!acb->pmuA) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
}
break;
}
case ACB_ADAPTER_TYPE_B:{
void __iomem *mem_base0, *mem_base1;
mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
if (!mem_base0) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
}
mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
if (!mem_base1) {
iounmap(mem_base0);
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
}
acb->mem_base0 = mem_base0;
acb->mem_base1 = mem_base1;
break;
}
case ACB_ADAPTER_TYPE_C:{
acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!acb->pmuC) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
return false;
}
if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
return true;
}
break;
}
}
return true;
}
static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:{
iounmap(acb->pmuA);
}
break;
case ACB_ADAPTER_TYPE_B:{
iounmap(acb->mem_base0);
iounmap(acb->mem_base1);
}
break;
case ACB_ADAPTER_TYPE_C:{
iounmap(acb->pmuC);
}
}
}
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
{
irqreturn_t handle_state;
struct AdapterControlBlock *acb = dev_id;
handle_state = arcmsr_interrupt(acb);
return handle_state;
}
static int arcmsr_bios_param(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int *geom)
{
int ret, heads, sectors, cylinders, total_capacity;
unsigned char *buffer;/* return copy of block device's partition table */
buffer = scsi_bios_ptable(bdev);
if (buffer) {
ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
kfree(buffer);
if (ret != -1)
return ret;
}
total_capacity = capacity;
heads = 64;
sectors = 32;
cylinders = total_capacity / (heads * sectors);
if (cylinders > 1024) {
heads = 255;
sectors = 63;
cylinders = total_capacity / (heads * sectors);
}
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return 0;
}
static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
{
struct pci_dev *pdev = acb->pdev;
u16 dev_id;
pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
acb->dev_id = dev_id;
switch (dev_id) {
case 0x1880: {
acb->adapter_type = ACB_ADAPTER_TYPE_C;
}
break;
case 0x1201: {
acb->adapter_type = ACB_ADAPTER_TYPE_B;
}
break;
default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
}
}
static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
int i;
for (i = 0; i < 2000; i++) {
if (readl(®->outbound_intstatus) &
ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
®->outbound_intstatus);
return true;
}
msleep(10);
} /* max 20 seconds */
return false;
}
static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
int i;
for (i = 0; i < 2000; i++) {
if (readl(reg->iop2drv_doorbell)
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
reg->drv2iop_doorbell);
return true;
}
msleep(10);
} /* max 20 seconds */
return false;
}
static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
int i;
for (i = 0; i < 2000; i++) {
if (readl(&phbcmu->outbound_doorbell)
& ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
&phbcmu->outbound_doorbell_clear); /*clear interrupt*/
return true;
}
msleep(10);
} /* max 20 seconds */
return false;
}
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
int retry_count = 30;
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
do {
if (arcmsr_hba_wait_msgint_ready(acb))
break;
else {
retry_count--;
printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
timeout, retry count down = %d \n", acb->host->host_no, retry_count);
}
} while (retry_count != 0);
}
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
int retry_count = 30;
writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
do {
if (arcmsr_hbb_wait_msgint_ready(acb))
break;
else {
retry_count--;
printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
timeout,retry count down = %d \n", acb->host->host_no, retry_count);
}
} while (retry_count != 0);
}
static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
{
struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
do {
if (arcmsr_hbc_wait_msgint_ready(pACB)) {
break;
} else {
retry_count--;
printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
}
} while (retry_count != 0);
return;
}
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_flush_hba_cache(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
arcmsr_flush_hbb_cache(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
arcmsr_flush_hbc_cache(acb);
}
}
}
static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
{
struct pci_dev *pdev = acb->pdev;
void *dma_coherent;
dma_addr_t dma_coherent_handle;
struct CommandControlBlock *ccb_tmp;
int i = 0, j = 0;
dma_addr_t cdb_phyaddr;
unsigned long roundup_ccbsize;
unsigned long max_xfer_len;
unsigned long max_sg_entrys;
uint32_t firm_config_version;
for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
acb->devstate[i][j] = ARECA_RAID_GONE;
max_xfer_len = ARCMSR_MAX_XFER_LEN;
max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
firm_config_version = acb->firm_cfg_version;
if((firm_config_version & 0xFF) >= 3){
max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
max_sg_entrys = (max_xfer_len/4096);
}
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
return -ENOMEM;
}
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
}
return 0;
}
static void arcmsr_message_isr_bh_fn(struct work_struct *work)
{
struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
char *acb_dev_map = (char *)acb->device_map;
uint32_t __iomem *signature = (uint32_t __iomem*) (®->message_rwbuffer[0]);
char __iomem *devicemap = (char __iomem*) (®->message_rwbuffer[21]);
int target, lun;
struct scsi_device *psdev;
char diff;
atomic_inc(&acb->rq_map_token);
if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
diff = (*acb_dev_map)^readb(devicemap);
if (diff != 0) {
char temp;
*acb_dev_map = readb(devicemap);
temp =*acb_dev_map;
for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
if((temp & 0x01)==1 && (diff & 0x01) == 1) {
scsi_add_device(acb->host, 0, target, lun);
}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
psdev = scsi_device_lookup(acb->host, 0, target, lun);
if (psdev != NULL ) {
scsi_remove_device(psdev);
scsi_device_put(psdev);
}
}
temp >>= 1;
diff >>= 1;
}
}
devicemap++;
acb_dev_map++;
}
}
break;
}
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
char *acb_dev_map = (char *)acb->device_map;
uint32_t __iomem *signature = (uint32_t __iomem*)(®->message_rwbuffer[0]);
char __iomem *devicemap = (char __iomem*)(®->message_rwbuffer[21]);
int target, lun;
struct scsi_device *psdev;
char diff;
atomic_inc(&acb->rq_map_token);
if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
diff = (*acb_dev_map)^readb(devicemap);
if (diff != 0) {
char temp;
*acb_dev_map = readb(devicemap);
temp =*acb_dev_map;
for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
if((temp & 0x01)==1 && (diff & 0x01) == 1) {
scsi_add_device(acb->host, 0, target, lun);
}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
psdev = scsi_device_lookup(acb->host, 0, target, lun);
if (psdev != NULL ) {
scsi_remove_device(psdev);
scsi_device_put(psdev);
}
}
temp >>= 1;
diff >>= 1;
}
}
devicemap++;
acb_dev_map++;
}
}
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = acb->pmuC;
char *acb_dev_map = (char *)acb->device_map;
uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
int target, lun;
struct scsi_device *psdev;
char diff;
atomic_inc(&acb->rq_map_token);
if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
diff = (*acb_dev_map)^readb(devicemap);
if (diff != 0) {
char temp;
*acb_dev_map = readb(devicemap);
temp = *acb_dev_map;
for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
scsi_add_device(acb->host, 0, target, lun);
} else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
psdev = scsi_device_lookup(acb->host, 0, target, lun);
if (psdev != NULL) {
scsi_remove_device(psdev);
scsi_device_put(psdev);
}
}
temp >>= 1;
diff >>= 1;
}
}
devicemap++;
acb_dev_map++;
}
}
}
}
}
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
struct AdapterControlBlock *acb;
uint8_t bus,dev_fun;
int error;
error = pci_enable_device(pdev);
if(error){
return -ENODEV;
}
host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
if(!host){
goto pci_disable_dev;
}
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if(error){
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if(error){
printk(KERN_WARNING
"scsi%d: No suitable DMA mask available\n",
host->host_no);
goto scsi_host_release;
}
}
init_waitqueue_head(&wait_q);
bus = pdev->bus->number;
dev_fun = pdev->devfn;
acb = (struct AdapterControlBlock *) host->hostdata;
memset(acb,0,sizeof(struct AdapterControlBlock));
acb->pdev = pdev;
acb->host = host;
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
pci_set_master(pdev);
error = pci_request_regions(pdev, "arcmsr");
if(error){
goto scsi_host_release;
}
spin_lock_init(&acb->eh_lock);
spin_lock_init(&acb->ccblist_lock);
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_RQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
INIT_LIST_HEAD(&acb->ccb_free_list);
arcmsr_define_adapter_type(acb);
error = arcmsr_remap_pciregion(acb);
if(!error){
goto pci_release_regs;
}
error = arcmsr_get_firmware_spec(acb);
if(!error){
goto unmap_pci_region;
}
error = arcmsr_alloc_ccb_pool(acb);
if(error){
goto free_hbb_mu;
}
arcmsr_iop_init(acb);
error = scsi_add_host(host, &pdev->dev);
if(error){
goto RAID_controller_stop;
}
error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
if(error){
goto scsi_host_remove;
}
host->irq = pdev->irq;
scsi_scan_host(host);
INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
init_timer(&acb->eternal_timer);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
acb->eternal_timer.data = (unsigned long) acb;
acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
return 0;
out_free_sysfs:
scsi_host_remove:
scsi_remove_host(host);
RAID_controller_stop:
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
arcmsr_free_ccb_pool(acb);
free_hbb_mu:
arcmsr_free_hbb_mu(acb);
unmap_pci_region:
arcmsr_unmap_pciregion(acb);
pci_release_regs:
pci_release_regions(pdev);
scsi_host_release:
scsi_host_put(host);
pci_disable_dev:
pci_disable_device(pdev);
return -ENODEV;
}
static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
if (!arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, acb->host->host_no);
return false;
}
return true;
}
static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, acb->host->host_no);
return false;
}
return true;
}
static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
{
struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, pACB->host->host_no);
return false;
}
return true;
}
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
rtnval = arcmsr_abort_hba_allcmd(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
rtnval = arcmsr_abort_hbb_allcmd(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
rtnval = arcmsr_abort_hbc_allcmd(acb);
}
}
return rtnval;
}
static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
{
struct MessageUnit_B *reg = pacb->pmuB;
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
return false;
}
return true;
}
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
{
struct scsi_cmnd *pcmd = ccb->pcmd;
scsi_dma_unmap(pcmd);
}
static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
{
struct AdapterControlBlock *acb = ccb->acb;
struct scsi_cmnd *pcmd = ccb->pcmd;
unsigned long flags;
atomic_dec(&acb->ccboutstandingcount);
arcmsr_pci_unmap_dma(ccb);
ccb->startdone = ARCMSR_CCB_DONE;
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
pcmd->scsi_done(pcmd);
}
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
pcmd->result = DID_OK << 16;
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
}
}
static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
{
u32 orig_mask = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A : {
struct MessageUnit_A __iomem *reg = acb->pmuA;
orig_mask = readl(®->outbound_intmask);
writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
®->outbound_intmask);
}
break;
case ACB_ADAPTER_TYPE_B : {
struct MessageUnit_B *reg = acb->pmuB;
orig_mask = readl(reg->iop2drv_doorbell_mask);
writel(0, reg->iop2drv_doorbell_mask);
}
break;
case ACB_ADAPTER_TYPE_C:{
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
/* disable all outbound interrupt */
orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
}
break;
}
return orig_mask;
}
static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, bool error)
{
uint8_t id, lun;
id = ccb->pcmd->device->id;
lun = ccb->pcmd->device->lun;
if (!error) {
if (acb->devstate[id][lun] == ARECA_RAID_GONE)
acb->devstate[id][lun] = ARECA_RAID_GOOD;
ccb->pcmd->result = DID_OK << 16;
arcmsr_ccb_complete(ccb);
}else{
switch (ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb);
}
break;
case ARCMSR_DEV_ABORTED:
case ARCMSR_DEV_INIT_FAIL: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
ccb->pcmd->result = DID_BAD_TARGET << 16;
arcmsr_ccb_complete(ccb);
}
break;
case ARCMSR_DEV_CHECK_CONDITION: {
acb->devstate[id][lun] = ARECA_RAID_GOOD;
arcmsr_report_sense_info(ccb);
arcmsr_ccb_complete(ccb);
}
break;
default:
printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d isr get command error done, \
but got unknown DeviceStatus = 0x%x \n"
, acb->host->host_no
, id
, lun
, ccb->arcmsr_cdb.DeviceStatus);
acb->devstate[id][lun] = ARECA_RAID_GONE;
ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb);
break;
}
}
}
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{
int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
struct scsi_cmnd *abortcmd = pCCB->pcmd;
if (abortcmd) {
id = abortcmd->device->id;
lun = abortcmd->device->lun;
abortcmd->result |= DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
acb->host->host_no, pCCB);
}
return;
}
printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
done acb = '0x%p'"
"ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
" ccboutstandingcount = %d \n"
, acb->host->host_no
, acb
, pCCB
, pCCB->acb
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
return;
}
arcmsr_report_ccb_state(acb, pCCB, error);
}
static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
{
int i = 0;
uint32_t flag_ccb;
struct ARCMSR_CDB *pARCMSR_CDB;
bool error;
struct CommandControlBlock *pCCB;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_intstatus;
outbound_intstatus = readl(®->outbound_intstatus) &
acb->outbound_int_enable;
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
&& (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
}
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*clear all outbound posted Q*/
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) {
writel(0, ®->done_qbuffer[i]);
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
}
reg->post_qbuffer[i] = 0;
}
reg->doneq_index = 0;
reg->postq_index = 0;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = acb->pmuC;
struct ARCMSR_CDB *pARCMSR_CDB;
uint32_t flag_ccb, ccb_cdb_phy;
bool error;
struct CommandControlBlock *pCCB;
while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
/*need to do*/
flag_ccb = readl(®->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
}
}
}
}
static void arcmsr_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
msleep(25);
}
if (atomic_read(&acb->ccboutstandingcount)) {
int i;
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
}
}
}
free_irq(pdev->irq, acb);
arcmsr_free_ccb_pool(acb);
arcmsr_free_hbb_mu(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static void arcmsr_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
static int arcmsr_module_init(void)
{
int error = 0;
error = pci_register_driver(&arcmsr_pci_driver);
return error;
}
static void arcmsr_module_exit(void)
{
pci_unregister_driver(&arcmsr_pci_driver);
}
module_init(arcmsr_module_init);
module_exit(arcmsr_module_exit);
static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
u32 intmask_org)
{
u32 mask;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
writel(mask, ®->outbound_intmask);
acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
ARCMSR_IOP2DRV_DATA_READ_OK |
ARCMSR_IOP2DRV_CDB_DONE |
ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
writel(mask, reg->iop2drv_doorbell_mask);
acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = acb->pmuC;
mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
writel(intmask_org & mask, ®->host_int_mask);
acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
}
}
}
static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
int8_t *psge = (int8_t *)&arcmsr_cdb->u;
__le32 address_lo, address_hi;
int arccdbsize = 0x30;
__le32 length = 0;
int i;
struct scatterlist *sg;
int nseg;
ccb->pcmd = pcmd;
memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
arcmsr_cdb->TargetID = pcmd->device->id;
arcmsr_cdb->LUN = pcmd->device->lun;
arcmsr_cdb->Function = 1;
arcmsr_cdb->Context = 0;
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
nseg = scsi_dma_map(pcmd);
if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
return FAILED;
scsi_for_each_sg(pcmd, sg, nseg, i) {
/* Get the physical address of the current data pointer */
length = cpu_to_le32(sg_dma_len(sg));
address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
if (address_hi == 0) {
struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
pdma_sg->address = address_lo;
pdma_sg->length = length;
psge += sizeof (struct SG32ENTRY);
arccdbsize += sizeof (struct SG32ENTRY);
} else {
struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
pdma_sg->addresshigh = address_hi;
pdma_sg->address = address_lo;
pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
psge += sizeof (struct SG64ENTRY);
arccdbsize += sizeof (struct SG64ENTRY);
}
}
arcmsr_cdb->sgcount = (uint8_t)nseg;
arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
if ( arccdbsize > 256)
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
if (pcmd->sc_data_direction == DMA_TO_DEVICE)
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
ccb->arc_cdb_size = arccdbsize;
return SUCCESS;
}
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
{
uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
atomic_inc(&acb->ccboutstandingcount);
ccb->startdone = ARCMSR_CCB_START;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
®->inbound_queueport);
else {
writel(cdb_phyaddr_pattern, ®->inbound_queueport);
}
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
uint32_t ending_index, index = reg->postq_index;
ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
writel(0, ®->post_qbuffer[ending_index]);
if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
®->post_qbuffer[index]);
} else {
writel(cdb_phyaddr_pattern, ®->post_qbuffer[index]);
}
index++;
index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
reg->postq_index = index;
writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
uint32_t ccb_post_stamp, arc_cdb_size;
arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
if (acb->cdb_phyaddr_hi32) {
writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
} else {
writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
}
}
}
}
static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
if (!arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, acb->host->host_no);
}
}
static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, acb->host->host_no);
}
}
static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
{
struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, pACB->host->host_no);
}
return;
}
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_stop_hba_bgrb(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
arcmsr_stop_hbb_bgrb(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
arcmsr_stop_hbc_bgrb(acb);
}
}
}
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
}
void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
}
}
}
static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
*/
writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
*/
writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
*/
writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
}
break;
}
}
struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
{
struct QBUFFER __iomem *qbuffer = NULL;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
}
}
return qbuffer;
}
static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
{
struct QBUFFER __iomem *pqbuffer = NULL;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
}
}
return pqbuffer;
}
static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
{
struct QBUFFER __iomem *prbuffer;
struct QBUFFER *pQbuffer;
uint8_t __iomem *iop_data;
int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
rqbuf_lastindex = acb->rqbuf_lastindex;
rqbuf_firstindex = acb->rqbuf_firstindex;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = prbuffer->data_len;
my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= iop_len)
{
while (iop_len > 0) {
pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
memcpy(pQbuffer, iop_data, 1);
rqbuf_lastindex++;
rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
iop_len--;
}
acb->rqbuf_lastindex = rqbuf_lastindex;
arcmsr_iop_message_read(acb);
}
else {
acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
}
}
static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
{
acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
uint8_t *pQbuffer;
struct QBUFFER __iomem *pwbuffer;
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
pwbuffer = arcmsr_get_iop_wqbuffer(acb);
iop_data = (uint8_t __iomem *)pwbuffer->data;
while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
(allxfer_len < 124)) {
pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
memcpy(iop_data, pQbuffer, 1);
acb->wqbuf_firstindex++;
acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
allxfer_len++;
}
pwbuffer->data_len = allxfer_len;
arcmsr_iop_message_wrote(acb);
}
if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
}
}
static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell);
if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(acb);
}
}
static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
{
uint32_t outbound_doorbell;
struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
/*
*******************************************************************
** Maybe here we need to check wrqbuffer_lock is lock or not
** DOORBELL: din! don!
** check if there are any mail need to pack from firmware
*******************************************************************
*/
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell_clear);/*clear interrupt*/
if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(pACB);
}
if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(pACB);
}
if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */
}
return;
}
static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
struct MessageUnit_A __iomem *reg = acb->pmuA;
struct ARCMSR_CDB *pARCMSR_CDB;
struct CommandControlBlock *pCCB;
bool error;
while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
}
}
static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t index;
uint32_t flag_ccb;
struct MessageUnit_B *reg = acb->pmuB;
struct ARCMSR_CDB *pARCMSR_CDB;
struct CommandControlBlock *pCCB;
bool error;
index = reg->doneq_index;
while ((flag_ccb = readl(®->done_qbuffer[index])) != 0) {
writel(0, ®->done_qbuffer[index]);
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_drain_donequeue(acb, pCCB, error);
index++;
index %= ARCMSR_MAX_HBB_POSTQUEUE;
reg->doneq_index = index;
}
}
static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_C *phbcmu;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
int error;
phbcmu = (struct MessageUnit_C *)acb->pmuC;
/* areca cdb command done */
/* Use correct offset and size for syncing */
while (readl(&phbcmu->host_int_status) &
ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
/* check if command done with no error*/
flag_ccb = readl(&phbcmu->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
/* check if command done with no error */
arcmsr_drain_donequeue(acb, ccb, error);
if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
break;
}
throttling++;
}
}
/*
**********************************************************************************
** Handle a message interrupt
**
** The only message interrupt we expect is in response to a query for the current adapter config.
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_A *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
** Handle a message interrupt
**
** The only message interrupt we expect is in response to a query for the
** current adapter config.
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
{
struct MessageUnit_C *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_intstatus;
struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_intstatus = readl(®->outbound_intstatus) &
acb->outbound_int_enable;
if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
return 1;
}
writel(outbound_intstatus, ®->outbound_intstatus);
if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
arcmsr_hba_doorbell_isr(acb);
}
if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
arcmsr_hba_postqueue_isr(acb);
}
if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
/* messenger of "driver to iop commands" */
arcmsr_hba_message_isr(acb);
}
return 0;
}
static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
struct MessageUnit_B *reg = acb->pmuB;
outbound_doorbell = readl(reg->iop2drv_doorbell) &
acb->outbound_int_enable;
if (!outbound_doorbell)
return 1;
writel(~outbound_doorbell, reg->iop2drv_doorbell);
/*in case the last action of doorbell interrupt clearance is cached,
this action can push HW to write down the clear bit*/
readl(reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
arcmsr_iop2drv_data_read_handle(acb);
}
if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
arcmsr_hbb_postqueue_isr(acb);
}
if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
/* messenger of "driver to iop commands" */
arcmsr_hbb_message_isr(acb);
}
return 0;
}
static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
{
uint32_t host_interrupt_status;
struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
/*
*********************************************
** check outbound intstatus
*********************************************
*/
host_interrupt_status = readl(&phbcmu->host_int_status);
if (!host_interrupt_status) {
/*it must be share irq*/
return 1;
}
/* MU ioctl transfer doorbell interrupts*/
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */
}
/* MU post queue interrupts*/
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */
}
return 0;
}
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
if (arcmsr_handle_hba_isr(acb)) {
return IRQ_NONE;
}
}
break;
case ACB_ADAPTER_TYPE_B: {
if (arcmsr_handle_hbb_isr(acb)) {
return IRQ_NONE;
}
}
break;
case ACB_ADAPTER_TYPE_C: {
if (arcmsr_handle_hbc_isr(acb)) {
return IRQ_NONE;
}
}
}
return IRQ_HANDLED;
}
static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
{
if (acb) {
/* stop adapter background rebuild */
if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
uint32_t intmask_org;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
arcmsr_enable_outbound_ints(acb, intmask_org);
}
}
}
void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
{
int32_t wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer;
struct QBUFFER __iomem *pwbuffer;
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
pwbuffer = arcmsr_get_iop_wqbuffer(acb);
iop_data = (uint8_t __iomem *)pwbuffer->data;
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
wqbuf_firstindex = acb->wqbuf_firstindex;
wqbuf_lastindex = acb->wqbuf_lastindex;
while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
memcpy(iop_data, pQbuffer, 1);
wqbuf_firstindex++;
wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
allxfer_len++;
}
acb->wqbuf_firstindex = wqbuf_firstindex;
pwbuffer->data_len = allxfer_len;
arcmsr_iop_message_wrote(acb);
}
}
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd)
{
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
int retvalue = 0, transfer_len = 0;
char *buffer;
struct scatterlist *sg;
uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
(uint32_t ) cmd->cmnd[6] << 16 |
(uint32_t ) cmd->cmnd[7] << 8 |
(uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
transfer_len += sg->length;
if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
switch(controlcode) {
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned char *ver_addr;
uint8_t *pQbuffer, *ptmpQbuffer;
int32_t allxfer_len = 0;
ver_addr = kmalloc(1032, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
ptmpQbuffer = ver_addr;
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
memcpy(ptmpQbuffer, pQbuffer, 1);
acb->rqbuf_firstindex++;
acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
ptmpQbuffer++;
allxfer_len++;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
acb->rqbuf_lastindex++;
acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
iop_len--;
}
arcmsr_iop_message_read(acb);
}
memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
pcmdmessagefld->cmdmessage.Length = allxfer_len;
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
}
kfree(ver_addr);
}
break;
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned char *ver_addr;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
ver_addr = kmalloc(1032, GFP_ATOMIC);
if (!ver_addr) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
}
ptmpuserbuffer = ver_addr;
user_len = pcmdmessagefld->cmdmessage.Length;
memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
wqbuf_lastindex = acb->wqbuf_lastindex;
wqbuf_firstindex = acb->wqbuf_firstindex;
if (wqbuf_lastindex != wqbuf_firstindex) {
struct SENSE_DATA *sensebuffer =
(struct SENSE_DATA *)cmd->sense_buffer;
arcmsr_post_ioctldata2iop(acb);
/* has error report sensedata */
sensebuffer->ErrorCode = 0x70;
sensebuffer->SenseKey = ILLEGAL_REQUEST;
sensebuffer->AdditionalSenseLength = 0x0A;
sensebuffer->AdditionalSenseCode = 0x20;
sensebuffer->Valid = 1;
retvalue = ARCMSR_MESSAGE_FAIL;
} else {
my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
&(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
pQbuffer =
&acb->wqbuffer[acb->wqbuf_lastindex];
memcpy(pQbuffer, ptmpuserbuffer, 1);
acb->wqbuf_lastindex++;
acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
ptmpuserbuffer++;
user_len--;
}
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
acb->acb_flags &=
~ACB_F_MESSAGE_WQBUFFER_CLEARED;
arcmsr_post_ioctldata2iop(acb);
}
} else {
/* has error report sensedata */
struct SENSE_DATA *sensebuffer =
(struct SENSE_DATA *)cmd->sense_buffer;
sensebuffer->ErrorCode = 0x70;
sensebuffer->SenseKey = ILLEGAL_REQUEST;
sensebuffer->AdditionalSenseLength = 0x0A;
sensebuffer->AdditionalSenseCode = 0x20;
sensebuffer->Valid = 1;
retvalue = ARCMSR_MESSAGE_FAIL;
}
}
kfree(ver_addr);
}
break;
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
uint8_t *pQbuffer = acb->rqbuffer;
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
acb->rqbuf_firstindex = 0;
acb->rqbuf_lastindex = 0;
memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
}
}
break;
case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
uint8_t *pQbuffer = acb->wqbuffer;
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
acb->wqbuf_firstindex = 0;
acb->wqbuf_lastindex = 0;
memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
}
break;
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
uint8_t *pQbuffer;
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED
| ACB_F_MESSAGE_RQBUFFER_CLEARED
| ACB_F_MESSAGE_WQBUFFER_READED);
acb->rqbuf_firstindex = 0;
acb->rqbuf_lastindex = 0;
acb->wqbuf_firstindex = 0;
acb->wqbuf_lastindex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof(struct QBUFFER));
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
}
}
break;
case ARCMSR_MESSAGE_RETURN_CODE_3F: {
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_3F;
}
break;
}
case ARCMSR_MESSAGE_SAY_HELLO: {
int8_t *hello_string = "Hello! I am ARCMSR";
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}else{
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_OK;
}
memcpy(pcmdmessagefld->messagedatabuffer, hello_string
, (int16_t)strlen(hello_string));
}
break;
case ARCMSR_MESSAGE_SAY_GOODBYE:
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}
arcmsr_iop_parking(acb);
break;
case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
if(acb->fw_flag == FW_DEADLOCK) {
pcmdmessagefld->cmdmessage.ReturnCode =
ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
}
arcmsr_flush_adapter_cache(acb);
break;
default:
retvalue = ARCMSR_MESSAGE_FAIL;
}
message_out:
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
return retvalue;
}
static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
{
struct list_head *head = &acb->ccb_free_list;
struct CommandControlBlock *ccb = NULL;
unsigned long flags;
spin_lock_irqsave(&acb->ccblist_lock, flags);
if (!list_empty(head)) {
ccb = list_entry(head->next, struct CommandControlBlock, list);
list_del_init(&ccb->list);
}else{
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
return 0;
}
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
return ccb;
}
static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd)
{
switch (cmd->cmnd[0]) {
case INQUIRY: {
unsigned char inqdata[36];
char *buffer;
struct scatterlist *sg;
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
cmd->scsi_done(cmd);
return;
}
inqdata[0] = TYPE_PROCESSOR;
/* Periph Qualifier & Periph Dev Type */
inqdata[1] = 0;
/* rem media bit & Dev Type Modifier */
inqdata[2] = 0;
/* ISO, ECMA, & ANSI versions */
inqdata[4] = 31;
/* length of additional data */
strncpy(&inqdata[8], "Areca ", 8);
/* Vendor Identification */
strncpy(&inqdata[16], "RAID controller ", 16);
/* Product Identification */
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
cmd->scsi_done(cmd);
}
break;
case WRITE_BUFFER:
case READ_BUFFER: {
if (arcmsr_iop_message_xfer(acb, cmd))
cmd->result = (DID_ERROR << 16);
cmd->scsi_done(cmd);
}
break;
default:
cmd->scsi_done(cmd);
}
}
static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
void (* done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
int lun = cmd->device->lun;
uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
cmd->result = (DID_NO_CONNECT << 16);
}
cmd->scsi_done(cmd);
return 0;
}
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
return 0;
}
if (atomic_read(&acb->ccboutstandingcount) >=
ARCMSR_MAX_OUTSTANDING_CMD)
return SCSI_MLQUEUE_HOST_BUSY;
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
cmd->scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);
return 0;
}
static DEF_SCSI_QCMD(arcmsr_queue_command)
static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
char *acb_device_map = acb->device_map;
char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]);
char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]);
char __iomem *iop_device_map = (char __iomem *)(®->message_rwbuffer[21]);
int count;
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
if (!arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
count = 8;
while (count){
*acb_firm_model = readb(iop_firm_model);
acb_firm_model++;
iop_firm_model++;
count--;
}
count = 16;
while (count){
*acb_firm_version = readb(iop_firm_version);
acb_firm_version++;
iop_firm_version++;
count--;
}
count=16;
while(count){
*acb_device_map = readb(iop_device_map);
acb_device_map++;
iop_device_map++;
count--;
}
printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
acb->host->host_no,
acb->firm_version,
acb->firm_model);
acb->signature = readl(®->message_rwbuffer[0]);
acb->firm_request_len = readl(®->message_rwbuffer[1]);
acb->firm_numbers_queue = readl(®->message_rwbuffer[2]);
acb->firm_sdram_size = readl(®->message_rwbuffer[3]);
acb->firm_hd_channels = readl(®->message_rwbuffer[4]);
acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
return true;
}
static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
struct pci_dev *pdev = acb->pdev;
void *dma_coherent;
dma_addr_t dma_coherent_handle;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
char *acb_device_map = acb->device_map;
char __iomem *iop_firm_model;
/*firm_model,15,60-67*/
char __iomem *iop_firm_version;
/*firm_version,17,68-83*/
char __iomem *iop_device_map;
/*firm_version,21,84-99*/
int count;
dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
if (!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
return false;
}
acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
reg = (struct MessageUnit_B *)dma_coherent;
acb->pmuB = reg;
reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); /*firm_model,15,60-67*/
iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); /*firm_version,17,68-83*/
iop_device_map = (char __iomem *)(®->message_rwbuffer[21]); /*firm_version,21,84-99*/
writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
count = 8;
while (count){
*acb_firm_model = readb(iop_firm_model);
acb_firm_model++;
iop_firm_model++;
count--;
}
count = 16;
while (count){
*acb_firm_version = readb(iop_firm_version);
acb_firm_version++;
iop_firm_version++;
count--;
}
count = 16;
while(count){
*acb_device_map = readb(iop_device_map);
acb_device_map++;
iop_device_map++;
count--;
}
printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
acb->host->host_no,
acb->firm_version,
acb->firm_model);
acb->signature = readl(®->message_rwbuffer[1]);
/*firm_signature,1,00-03*/
acb->firm_request_len = readl(®->message_rwbuffer[2]);
/*firm_request_len,1,04-07*/
acb->firm_numbers_queue = readl(®->message_rwbuffer[3]);
/*firm_numbers_queue,2,08-11*/
acb->firm_sdram_size = readl(®->message_rwbuffer[4]);
/*firm_sdram_size,3,12-15*/
acb->firm_hd_channels = readl(®->message_rwbuffer[5]);
/*firm_ide_channels,4,16-19*/
acb->firm_cfg_version = readl(®->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*firm_ide_channels,4,16-19*/
return true;
}
static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
{
uint32_t intmask_org, Index, firmware_state = 0;
struct MessageUnit_C *reg = pACB->pmuC;
char *acb_firm_model = pACB->firm_model;
char *acb_firm_version = pACB->firm_version;
char *iop_firm_model = (char *)(®->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
char *iop_firm_version = (char *)(®->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
int count;
/* disable all outbound interrupt */
intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
/* wait firmware ready */
do {
firmware_state = readl(®->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
/* wait message ready */
for (Index = 0; Index < 2000; Index++) {
if (readl(®->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);/*clear interrupt*/
break;
}
udelay(10);
} /*max 1 seconds*/
if (Index >= 2000) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
count = 8;
while (count) {
*acb_firm_model = readb(iop_firm_model);
acb_firm_model++;
iop_firm_model++;
count--;
}
count = 16;
while (count) {
*acb_firm_version = readb(iop_firm_version);
acb_firm_version++;
iop_firm_version++;
count--;
}
printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
pACB->host->host_no,
pACB->firm_version,
pACB->firm_model);
pACB->firm_request_len = readl(®->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
pACB->firm_numbers_queue = readl(®->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
pACB->firm_sdram_size = readl(®->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
pACB->firm_hd_channels = readl(®->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
pACB->firm_cfg_version = readl(®->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
/*all interrupt service will be enable at arcmsr_iop_init*/
return true;
}
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
{
if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
return arcmsr_get_hba_config(acb);
else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
return arcmsr_get_hbb_config(acb);
else
return arcmsr_get_hbc_config(acb);
}
static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
struct CommandControlBlock *ccb;
struct ARCMSR_CDB *arcmsr_cdb;
uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
int rtn;
bool error;
polling_hba_ccb_retry:
poll_count++;
outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
while (1) {
if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
if (poll_ccb_done){
rtn = SUCCESS;
break;
}else {
msleep(25);
if (poll_count > 100){
rtn = FAILED;
break;
}
goto polling_hba_ccb_retry;
}
}
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done = (ccb == poll_ccb) ? 1:0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
, acb->host->host_no
, ccb->pcmd->device->id
, ccb->pcmd->device->lun
, ccb);
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
" command done ccb = '0x%p'"
"ccboutstandingcount = %d \n"
, acb->host->host_no
, ccb
, atomic_read(&acb->ccboutstandingcount));
continue;
}
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_report_ccb_state(acb, ccb, error);
}
return rtn;
}
static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
int index, rtn;
bool error;
polling_hbb_ccb_retry:
poll_count++;
/* clear doorbell interrupt */
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
while(1){
index = reg->doneq_index;
if ((flag_ccb = readl(®->done_qbuffer[index])) == 0) {
if (poll_ccb_done){
rtn = SUCCESS;
break;
}else {
msleep(25);
if (poll_count > 100){
rtn = FAILED;
break;
}
goto polling_hbb_ccb_retry;
}
}
writel(0, ®->done_qbuffer[index]);
index++;
/*if last index number set it to 0 */
index %= ARCMSR_MAX_HBB_POSTQUEUE;
reg->doneq_index = index;
/* check if command done with no error*/
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done = (ccb == poll_ccb) ? 1:0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
,acb->host->host_no
,ccb->pcmd->device->id
,ccb->pcmd->device->lun
,ccb);
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
" command done ccb = '0x%p'"
"ccboutstandingcount = %d \n"
, acb->host->host_no
, ccb
, atomic_read(&acb->ccboutstandingcount));
continue;
}
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_report_ccb_state(acb, ccb, error);
}
return rtn;
}
static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
uint32_t flag_ccb, ccb_cdb_phy;
struct ARCMSR_CDB *arcmsr_cdb;
bool error;
struct CommandControlBlock *pCCB;
uint32_t poll_ccb_done = 0, poll_count = 0;
int rtn;
polling_hbc_ccb_retry:
poll_count++;
while (1) {
if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
if (poll_ccb_done) {
rtn = SUCCESS;
break;
} else {
msleep(25);
if (poll_count > 100) {
rtn = FAILED;
break;
}
goto polling_hbc_ccb_retry;
}
}
flag_ccb = readl(®->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
/* check ifcommand done with no error*/
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
, acb->host->host_no
, pCCB->pcmd->device->id
, pCCB->pcmd->device->lun
, pCCB);
pCCB->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
" command done ccb = '0x%p'"
"ccboutstandingcount = %d \n"
, acb->host->host_no
, pCCB
, atomic_read(&acb->ccboutstandingcount));
continue;
}
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
arcmsr_report_ccb_state(acb, pCCB, error);
}
return rtn;
}
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
int rtn = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
}
break;
case ACB_ADAPTER_TYPE_B: {
rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
}
break;
case ACB_ADAPTER_TYPE_C: {
rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
}
}
return rtn;
}
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
/*
********************************************************************
** here we need to tell iop 331 our freeccb.HighPart
** if freeccb.HighPart is not zero
********************************************************************
*/
cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
/*
***********************************************************************
** if adapter type B, set window of "post command Q"
***********************************************************************
*/
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
if (cdb_phyaddr_hi32 != 0) {
struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t intmask_org;
intmask_org = arcmsr_disable_outbound_ints(acb);
writel(ARCMSR_SIGNATURE_SET_CONFIG, \
®->message_rwbuffer[0]);
writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
®->inbound_msgaddr0);
if (!arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
part physical address timeout\n",
acb->host->host_no);
return 1;
}
arcmsr_enable_outbound_ints(acb, intmask_org);
}
}
break;
case ACB_ADAPTER_TYPE_B: {
unsigned long post_queue_phyaddr;
uint32_t __iomem *rwbuffer;
struct MessageUnit_B *reg = acb->pmuB;
uint32_t intmask_org;
intmask_org = arcmsr_disable_outbound_ints(acb);
reg->postq_index = 0;
reg->doneq_index = 0;
writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
acb->host->host_no);
return 1;
}
post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
rwbuffer = reg->message_rwbuffer;
/* driver "set config" signature */
writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
/* normal should be zero */
writel(cdb_phyaddr_hi32, rwbuffer++);
/* postQ size (256 + 8)*4 */
writel(post_queue_phyaddr, rwbuffer++);
/* doneQ size (256 + 8)*4 */
writel(post_queue_phyaddr + 1056, rwbuffer++);
/* ccb maxQ size must be --> [(256 + 8)*4]*/
writel(1056, rwbuffer);
writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
timeout \n",acb->host->host_no);
return 1;
}
arcmsr_hbb_enable_driver_mode(acb);
arcmsr_enable_outbound_ints(acb, intmask_org);
}
break;
case ACB_ADAPTER_TYPE_C: {
if (cdb_phyaddr_hi32 != 0) {
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
acb->adapter_index, cdb_phyaddr_hi32);
writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
if (!arcmsr_hbc_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
timeout \n", acb->host->host_no);
return 1;
}
}
}
}
return 0;
}
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
{
uint32_t firmware_state = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
do {
firmware_state = readl(®->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
do {
firmware_state = readl(reg->iop2drv_doorbell);
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
do {
firmware_state = readl(®->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
}
}
}
static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
if (atomic_dec_and_test(&acb->rq_map_token)) {
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
}
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
return;
}
static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_B __iomem *reg = acb->pmuB;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
if (atomic_dec_and_test(&acb->rq_map_token)) {
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
}
writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
return;
}
static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
{
struct MessageUnit_C __iomem *reg = acb->pmuC;
if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
} else {
acb->fw_flag = FW_NORMAL;
if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
atomic_set(&acb->rq_map_token, 16);
}
atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
if (atomic_dec_and_test(&acb->rq_map_token)) {
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
return;
}
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
return;
}
static void arcmsr_request_device_map(unsigned long pacb)
{
struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_request_hba_device_map(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
arcmsr_request_hbb_device_map(acb);
}
break;
case ACB_ADAPTER_TYPE_C: {
arcmsr_request_hbc_device_map(acb);
}
}
}
static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
if (!arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", acb->host->host_no);
}
}
static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n",acb->host->host_no);
}
}
static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
{
struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
pACB->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", pACB->host->host_no);
}
return;
}
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
arcmsr_start_hba_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_B:
arcmsr_start_hbb_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_C:
arcmsr_start_hbc_bgrb(acb);
}
}
static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_doorbell;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
/*clear doorbell interrupt */
writel(outbound_doorbell, ®->outbound_doorbell);
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
uint32_t outbound_doorbell;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
}
}
}
static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
return;
case ACB_ADAPTER_TYPE_B:
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
return;
}
}
break;
case ACB_ADAPTER_TYPE_C:
return;
}
return;
}
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
{
uint8_t value[64];
int i, count = 0;
struct MessageUnit_A __iomem *pmuA = acb->pmuA;
struct MessageUnit_C __iomem *pmuC = acb->pmuC;
u32 temp = 0;
/* backup pci config data */
printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
for (i = 0; i < 64; i++) {
pci_read_config_byte(acb->pdev, i, &value[i]);
}
/* hardware reset signal */
if ((acb->dev_id == 0x1680)) {
writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
} else if ((acb->dev_id == 0x1880)) {
do {
count++;
writel(0xF, &pmuC->write_sequence);
writel(0x4, &pmuC->write_sequence);
writel(0xB, &pmuC->write_sequence);
writel(0x2, &pmuC->write_sequence);
writel(0x7, &pmuC->write_sequence);
writel(0xD, &pmuC->write_sequence);
} while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
}
msleep(2000);
/* write back pci config data */
for (i = 0; i < 64; i++) {
pci_write_config_byte(acb->pdev, i, value[i]);
}
msleep(1000);
return;
}
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
/*start background rebuild*/
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
}
static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
{
struct CommandControlBlock *ccb;
uint32_t intmask_org;
uint8_t rtnval = 0x00;
int i = 0;
unsigned long flags;
if (atomic_read(&acb->ccboutstandingcount) != 0) {
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
/* talk to iop 331 outstanding command aborted */
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
ccb->startdone = ARCMSR_CCB_DONE;
ccb->ccb_flags = 0;
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
}
}
atomic_set(&acb->ccboutstandingcount, 0);
/* enable all outbound interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
return rtnval;
}
return rtnval;
}
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
switch(acb->adapter_type){
case ACB_ADAPTER_TYPE_A:{
if (acb->acb_flags & ACB_F_BUS_RESET){
long timeout;
printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
if (timeout) {
return SUCCESS;
}
}
acb->acb_flags |= ACB_F_BUS_RESET;
if (!arcmsr_iop_reset(acb)) {
struct MessageUnit_A __iomem *reg;
reg = acb->pmuA;
arcmsr_hardware_reset(acb);
acb->acb_flags &= ~ACB_F_IOP_INITED;
sleep_again:
ssleep(ARCMSR_SLEEPTIME);
if ((readl(®->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
if (retry_count > ARCMSR_RETRYCOUNT) {
acb->fw_flag = FW_DEADLOCK;
printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
return FAILED;
}
retry_count++;
goto sleep_again;
}
acb->acb_flags |= ACB_F_IOP_INITED;
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_get_firmware_spec(acb);
arcmsr_start_adapter_bgrb(acb);
/* clear Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
acb->acb_flags &= ~ACB_F_BUS_RESET;
rtn = SUCCESS;
printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
rtn = SUCCESS;
}
break;
}
case ACB_ADAPTER_TYPE_B:{
acb->acb_flags |= ACB_F_BUS_RESET;
if (!arcmsr_iop_reset(acb)) {
acb->acb_flags &= ~ACB_F_BUS_RESET;
rtn = FAILED;
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
rtn = SUCCESS;
}
break;
}
case ACB_ADAPTER_TYPE_C:{
if (acb->acb_flags & ACB_F_BUS_RESET) {
long timeout;
printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
if (timeout) {
return SUCCESS;
}
}
acb->acb_flags |= ACB_F_BUS_RESET;
if (!arcmsr_iop_reset(acb)) {
struct MessageUnit_C __iomem *reg;
reg = acb->pmuC;
arcmsr_hardware_reset(acb);
acb->acb_flags &= ~ACB_F_IOP_INITED;
sleep:
ssleep(ARCMSR_SLEEPTIME);
if ((readl(®->host_diagnostic) & 0x04) != 0) {
printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
if (retry_count > ARCMSR_RETRYCOUNT) {
acb->fw_flag = FW_DEADLOCK;
printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
return FAILED;
}
retry_count++;
goto sleep;
}
acb->acb_flags |= ACB_F_IOP_INITED;
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_get_firmware_spec(acb);
arcmsr_start_adapter_bgrb(acb);
/* clear Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell_clear); /*clear interrupt */
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
acb->acb_flags &= ~ACB_F_BUS_RESET;
rtn = SUCCESS;
printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
rtn = SUCCESS;
}
break;
}
}
return rtn;
}
static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb)
{
int rtn;
rtn = arcmsr_polling_ccbdone(acb, ccb);
return rtn;
}
static int arcmsr_abort(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)cmd->device->host->hostdata;
int i = 0;
int rtn = FAILED;
printk(KERN_NOTICE
"arcmsr%d: abort device command of scsi id = %d lun = %d \n",
acb->host->host_no, cmd->device->id, cmd->device->lun);
acb->acb_flags |= ACB_F_ABORT;
acb->num_aborts++;
/*
************************************************
** the all interrupt service routine is locked
** we need to handle it as soon as possible and exit
************************************************
*/
if (!atomic_read(&acb->ccboutstandingcount))
return rtn;
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
rtn = arcmsr_abort_one_cmd(acb, ccb);
break;
}
}
acb->acb_flags &= ~ACB_F_ABORT;
return rtn;
}
static const char *arcmsr_info(struct Scsi_Host *host)
{
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
static char buf[256];
char *type;
int raid6 = 1;
switch (acb->pdev->device) {
case PCI_DEVICE_ID_ARECA_1110:
case PCI_DEVICE_ID_ARECA_1200:
case PCI_DEVICE_ID_ARECA_1202:
case PCI_DEVICE_ID_ARECA_1210:
raid6 = 0;
/*FALLTHRU*/
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
case PCI_DEVICE_ID_ARECA_1170:
case PCI_DEVICE_ID_ARECA_1201:
case PCI_DEVICE_ID_ARECA_1220:
case PCI_DEVICE_ID_ARECA_1230:
case PCI_DEVICE_ID_ARECA_1260:
case PCI_DEVICE_ID_ARECA_1270:
case PCI_DEVICE_ID_ARECA_1280:
type = "SATA";
break;
case PCI_DEVICE_ID_ARECA_1380:
case PCI_DEVICE_ID_ARECA_1381:
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
type = "SAS";
break;
default:
type = "X-TYPE";
break;
}
sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
type, raid6 ? "( RAID6 capable)" : "",
ARCMSR_DRIVER_VERSION);
return buf;
}
| gpl-2.0 |
timmytim/honeybutter_kernel | drivers/hwmon/smsc47b397.c | 1550 | 10119 | /*
smsc47b397.c - Part of lm_sensors, Linux kernel modules
for hardware monitoring
Supports the SMSC LPC47B397-NC Super-I/O chip.
Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com>
Copyright (C) 2004 Utilitek Systems, Inc.
derived in part from smsc47m1.c:
Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/io.h>
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
static struct platform_device *pdev;
#define DRVNAME "smsc47b397"
/* Super-I/0 registers and commands */
#define REG 0x2e /* The register to read/write */
#define VAL 0x2f /* The value to read/write */
static inline void superio_outb(int reg, int val)
{
outb(reg, REG);
outb(val, VAL);
}
static inline int superio_inb(int reg)
{
outb(reg, REG);
return inb(VAL);
}
/* select superio logical device */
static inline void superio_select(int ld)
{
superio_outb(0x07, ld);
}
static inline void superio_enter(void)
{
outb(0x55, REG);
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
}
#define SUPERIO_REG_DEVID 0x20
#define SUPERIO_REG_DEVREV 0x21
#define SUPERIO_REG_BASE_MSB 0x60
#define SUPERIO_REG_BASE_LSB 0x61
#define SUPERIO_REG_LD8 0x08
#define SMSC_EXTENT 0x02
/* 0 <= nr <= 3 */
static u8 smsc47b397_reg_temp[] = {0x25, 0x26, 0x27, 0x80};
#define SMSC47B397_REG_TEMP(nr) (smsc47b397_reg_temp[(nr)])
/* 0 <= nr <= 3 */
#define SMSC47B397_REG_FAN_LSB(nr) (0x28 + 2 * (nr))
#define SMSC47B397_REG_FAN_MSB(nr) (0x29 + 2 * (nr))
struct smsc47b397_data {
unsigned short addr;
const char *name;
struct device *hwmon_dev;
struct mutex lock;
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
int valid;
/* register values */
u16 fan[4];
u8 temp[4];
};
static int smsc47b397_read_value(struct smsc47b397_data* data, u8 reg)
{
int res;
mutex_lock(&data->lock);
outb(reg, data->addr);
res = inb_p(data->addr + 1);
mutex_unlock(&data->lock);
return res;
}
static struct smsc47b397_data *smsc47b397_update_device(struct device *dev)
{
struct smsc47b397_data *data = dev_get_drvdata(dev);
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
dev_dbg(dev, "starting device update...\n");
/* 4 temperature inputs, 4 fan inputs */
for (i = 0; i < 4; i++) {
data->temp[i] = smsc47b397_read_value(data,
SMSC47B397_REG_TEMP(i));
/* must read LSB first */
data->fan[i] = smsc47b397_read_value(data,
SMSC47B397_REG_FAN_LSB(i));
data->fan[i] |= smsc47b397_read_value(data,
SMSC47B397_REG_FAN_MSB(i)) << 8;
}
data->last_updated = jiffies;
data->valid = 1;
dev_dbg(dev, "... device update complete\n");
}
mutex_unlock(&data->update_lock);
return data;
}
/* TEMP: 0.001C/bit (-128C to +127C)
REG: 1C/bit, two's complement */
static int temp_from_reg(u8 reg)
{
return (s8)reg * 1000;
}
static ssize_t show_temp(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
/* FAN: 1 RPM/bit
REG: count of 90kHz pulses / revolution */
static int fan_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
return 90000 * 60 / reg;
}
static ssize_t show_fan(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", fan_from_reg(data->fan[attr->index]));
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct smsc47b397_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct attribute *smsc47b397_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&dev_attr_name.attr,
NULL
};
static const struct attribute_group smsc47b397_group = {
.attrs = smsc47b397_attributes,
};
static int __devexit smsc47b397_remove(struct platform_device *pdev)
{
struct smsc47b397_data *data = platform_get_drvdata(pdev);
struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &smsc47b397_group);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, SMSC_EXTENT);
kfree(data);
return 0;
}
static int smsc47b397_probe(struct platform_device *pdev);
static struct platform_driver smsc47b397_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = smsc47b397_probe,
.remove = __devexit_p(smsc47b397_remove),
};
static int __devinit smsc47b397_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47b397_data *data;
struct resource *res;
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, SMSC_EXTENT,
smsc47b397_driver.driver.name)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start,
(unsigned long)res->start + SMSC_EXTENT - 1);
return -EBUSY;
}
if (!(data = kzalloc(sizeof(struct smsc47b397_data), GFP_KERNEL))) {
err = -ENOMEM;
goto error_release;
}
data->addr = res->start;
data->name = "smsc47b397";
mutex_init(&data->lock);
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
if ((err = sysfs_create_group(&dev->kobj, &smsc47b397_group)))
goto error_free;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error_remove;
}
return 0;
error_remove:
sysfs_remove_group(&dev->kobj, &smsc47b397_group);
error_free:
kfree(data);
error_release:
release_region(res->start, SMSC_EXTENT);
return err;
}
static int __init smsc47b397_device_add(unsigned short address)
{
struct resource res = {
.start = address,
.end = address + SMSC_EXTENT - 1,
.name = DRVNAME,
.flags = IORESOURCE_IO,
};
int err;
err = acpi_check_resource_conflict(&res);
if (err)
goto exit;
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
printk(KERN_ERR DRVNAME ": Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
printk(KERN_ERR DRVNAME ": Device resource addition failed "
"(%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
err);
goto exit_device_put;
}
return 0;
exit_device_put:
platform_device_put(pdev);
exit:
return err;
}
static int __init smsc47b397_find(unsigned short *addr)
{
u8 id, rev;
char *name;
superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch(id) {
case 0x81:
name = "SCH5307-NS";
break;
case 0x6f:
name = "LPC47B397-NC";
break;
case 0x85:
case 0x8c:
name = "SCH5317";
break;
default:
superio_exit();
return -ENODEV;
}
rev = superio_inb(SUPERIO_REG_DEVREV);
superio_select(SUPERIO_REG_LD8);
*addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
| superio_inb(SUPERIO_REG_BASE_LSB);
printk(KERN_INFO DRVNAME ": found SMSC %s "
"(base address 0x%04x, revision %u)\n",
name, *addr, rev);
superio_exit();
return 0;
}
static int __init smsc47b397_init(void)
{
unsigned short address;
int ret;
if ((ret = smsc47b397_find(&address)))
return ret;
ret = platform_driver_register(&smsc47b397_driver);
if (ret)
goto exit;
/* Sets global pdev as a side effect */
ret = smsc47b397_device_add(address);
if (ret)
goto exit_driver;
return 0;
exit_driver:
platform_driver_unregister(&smsc47b397_driver);
exit:
return ret;
}
static void __exit smsc47b397_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&smsc47b397_driver);
}
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
MODULE_DESCRIPTION("SMSC LPC47B397 driver");
MODULE_LICENSE("GPL");
module_init(smsc47b397_init);
module_exit(smsc47b397_exit);
| gpl-2.0 |
LEPT-Development/android_kernel_lge_msm8916 | drivers/w1/masters/w1-gpio.c | 2062 | 5098 | /*
* w1-gpio - GPIO w1 bus master driver
*
* Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
#include <linux/gpio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/err.h>
#include <linux/of.h>
#include "../w1.h"
#include "../w1_int.h"
static void w1_gpio_write_bit_dir(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
if (bit)
gpio_direction_input(pdata->pin);
else
gpio_direction_output(pdata->pin, 0);
}
static void w1_gpio_write_bit_val(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
gpio_set_value(pdata->pin, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
return gpio_get_value(pdata->pin) ? 1 : 0;
}
#if defined(CONFIG_OF)
static struct of_device_id w1_gpio_dt_ids[] = {
{ .compatible = "w1-gpio" },
{}
};
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
static int w1_gpio_probe_dt(struct platform_device *pdev)
{
struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
if (of_get_property(np, "linux,open-drain", NULL))
pdata->is_open_drain = 1;
pdata->pin = of_get_gpio(np, 0);
pdata->ext_pullup_enable_pin = of_get_gpio(np, 1);
pdev->dev.platform_data = pdata;
return 0;
}
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
struct pinctrl *pinctrl;
int err;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "unable to select pin group\n");
if (of_have_populated_dt()) {
err = w1_gpio_probe_dt(pdev);
if (err < 0) {
dev_err(&pdev->dev, "Failed to parse DT\n");
return err;
}
}
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No configuration data\n");
return -ENXIO;
}
master = kzalloc(sizeof(struct w1_bus_master), GFP_KERNEL);
if (!master) {
dev_err(&pdev->dev, "Out of memory\n");
return -ENOMEM;
}
err = gpio_request(pdata->pin, "w1");
if (err) {
dev_err(&pdev->dev, "gpio_request (pin) failed\n");
goto free_master;
}
if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
err = gpio_request_one(pdata->ext_pullup_enable_pin,
GPIOF_INIT_LOW, "w1 pullup");
if (err < 0) {
dev_err(&pdev->dev, "gpio_request_one "
"(ext_pullup_enable_pin) failed\n");
goto free_gpio;
}
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
if (pdata->is_open_drain) {
gpio_direction_output(pdata->pin, 1);
master->write_bit = w1_gpio_write_bit_val;
} else {
gpio_direction_input(pdata->pin);
master->write_bit = w1_gpio_write_bit_dir;
}
err = w1_add_master_device(master);
if (err) {
dev_err(&pdev->dev, "w1_add_master device failed\n");
goto free_gpio_ext_pu;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
if (gpio_is_valid(pdata->ext_pullup_enable_pin))
gpio_set_value(pdata->ext_pullup_enable_pin, 1);
platform_set_drvdata(pdev, master);
return 0;
free_gpio_ext_pu:
if (gpio_is_valid(pdata->ext_pullup_enable_pin))
gpio_free(pdata->ext_pullup_enable_pin);
free_gpio:
gpio_free(pdata->pin);
free_master:
kfree(master);
return err;
}
static int w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
if (gpio_is_valid(pdata->ext_pullup_enable_pin))
gpio_set_value(pdata->ext_pullup_enable_pin, 0);
w1_remove_master_device(master);
gpio_free(pdata->pin);
kfree(master);
return 0;
}
#ifdef CONFIG_PM
static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
{
struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
return 0;
}
static int w1_gpio_resume(struct platform_device *pdev)
{
struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
return 0;
}
#else
#define w1_gpio_suspend NULL
#define w1_gpio_resume NULL
#endif
static struct platform_driver w1_gpio_driver = {
.driver = {
.name = "w1-gpio",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
.remove = w1_gpio_remove,
.suspend = w1_gpio_suspend,
.resume = w1_gpio_resume,
};
module_platform_driver(w1_gpio_driver);
MODULE_DESCRIPTION("GPIO w1 bus master driver");
MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
wanam/Adam-Kernel-GalaxyS6-G920F | arch/mips/kernel/smp-cmp.c | 2062 | 5301 | /*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Chris Dearman (chris@mips.com)
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/amon.h>
#include <asm/gic.h>
static void ipi_call_function(unsigned int cpu)
{
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
gic_send_ipi(plat_ipi_call_int_xlate(cpu));
}
static void ipi_resched(unsigned int cpu)
{
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
}
/*
* FIXME: This isn't restricted to CMP
* The SMVP kernel could use GIC interrupts if available
*/
void cmp_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
local_irq_save(flags);
switch (action) {
case SMP_CALL_FUNCTION:
ipi_call_function(cpu);
break;
case SMP_RESCHEDULE_YOURSELF:
ipi_resched(cpu);
break;
}
local_irq_restore(flags);
}
static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
static void cmp_init_secondary(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
/* Assume GIC is present */
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
STATUSF_IP7);
/* Enable per-cpu interrupts: platform specific */
c->core = (read_c0_ebase() >> 1) & 0x1ff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
}
static void cmp_smp_finish(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
static void cmp_cpus_done(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it running
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*/
static void cmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
unsigned long pc = (unsigned long)&smp_bootstrap;
unsigned long a0 = 0;
pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
__func__, cpu);
#if 0
/* Needed? */
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
}
/*
* Common setup before any secondaries are started
*/
void __init cmp_smp_setup(void)
{
int i;
int ncpu = 0;
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
}
if (cpu_has_mipsmt) {
unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
smp_num_siblings = nvpe;
}
pr_info("Detected %i available secondary CPU(s)\n", ncpu);
}
void __init cmp_prepare_cpus(unsigned int max_cpus)
{
pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
smp_processor_id(), __func__, max_cpus);
/*
* FIXME: some of these options are per-system, some per-core and
* some per-cpu
*/
mips_mt_set_cpuoptions();
}
struct plat_smp_ops cmp_smp_ops = {
.send_ipi_single = cmp_send_ipi_single,
.send_ipi_mask = cmp_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.cpus_done = cmp_cpus_done,
.boot_secondary = cmp_boot_secondary,
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,
};
| gpl-2.0 |
SamsungGalaxyS6/kernel_samsung_exynos7420 | drivers/usb/misc/yurex.c | 2318 | 13510 | /*
* Driver for Meywa-Denki & KAYAC YUREX
*
* Copyright (C) 2010 Tomoki Sekiyama (tomoki.sekiyama@gmail.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/hid.h>
#define DRIVER_AUTHOR "Tomoki Sekiyama"
#define DRIVER_DESC "Driver for Meywa-Denki & KAYAC YUREX"
#define YUREX_VENDOR_ID 0x0c45
#define YUREX_PRODUCT_ID 0x1010
#define CMD_ACK '!'
#define CMD_ANIMATE 'A'
#define CMD_COUNT 'C'
#define CMD_LED 'L'
#define CMD_READ 'R'
#define CMD_SET 'S'
#define CMD_VERSION 'V'
#define CMD_EOF 0x0d
#define CMD_PADDING 0xff
#define YUREX_BUF_SIZE 8
#define YUREX_WRITE_TIMEOUT (HZ*2)
/* table of devices that work with this driver */
static struct usb_device_id yurex_table[] = {
{ USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, yurex_table);
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define YUREX_MINOR_BASE 0
#else
#define YUREX_MINOR_BASE 192
#endif
/* Structure to hold all of our device specific stuff */
struct usb_yurex {
struct usb_device *udev;
struct usb_interface *interface;
__u8 int_in_endpointAddr;
struct urb *urb; /* URB for interrupt in */
unsigned char *int_buffer; /* buffer for intterupt in */
struct urb *cntl_urb; /* URB for control msg */
struct usb_ctrlrequest *cntl_req; /* req for control msg */
unsigned char *cntl_buffer; /* buffer for control msg */
struct kref kref;
struct mutex io_mutex;
struct fasync_struct *async_queue;
wait_queue_head_t waitq;
spinlock_t lock;
__s64 bbu; /* BBU from device */
};
#define to_yurex_dev(d) container_of(d, struct usb_yurex, kref)
static struct usb_driver yurex_driver;
static const struct file_operations yurex_fops;
static void yurex_control_callback(struct urb *urb)
{
struct usb_yurex *dev = urb->context;
int status = urb->status;
if (status) {
dev_err(&urb->dev->dev, "%s - control failed: %d\n",
__func__, status);
wake_up_interruptible(&dev->waitq);
return;
}
/* on success, sender woken up by CMD_ACK int in, or timeout */
}
static void yurex_delete(struct kref *kref)
{
struct usb_yurex *dev = to_yurex_dev(kref);
dev_dbg(&dev->interface->dev, "%s\n", __func__);
usb_put_dev(dev->udev);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
if (dev->cntl_buffer)
usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->cntl_buffer, dev->cntl_urb->transfer_dma);
usb_free_urb(dev->cntl_urb);
}
if (dev->urb) {
usb_kill_urb(dev->urb);
if (dev->int_buffer)
usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
kfree(dev);
}
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver yurex_class = {
.name = "yurex%d",
.fops = &yurex_fops,
.minor_base = YUREX_MINOR_BASE,
};
static void yurex_interrupt(struct urb *urb)
{
struct usb_yurex *dev = urb->context;
unsigned char *buf = dev->int_buffer;
int status = urb->status;
unsigned long flags;
int retval, i;
switch (status) {
case 0: /*success*/
break;
case -EOVERFLOW:
dev_err(&dev->interface->dev,
"%s - overflow with length %d, actual length is %d\n",
__func__, YUREX_BUF_SIZE, dev->urb->actual_length);
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -EILSEQ:
/* The device is terminated, clean up */
return;
default:
dev_err(&dev->interface->dev,
"%s - unknown status received: %d\n", __func__, status);
goto exit;
}
/* handle received message */
switch (buf[0]) {
case CMD_COUNT:
case CMD_READ:
if (buf[6] == CMD_EOF) {
spin_lock_irqsave(&dev->lock, flags);
dev->bbu = 0;
for (i = 1; i < 6; i++) {
dev->bbu += buf[i];
if (i != 5)
dev->bbu <<= 8;
}
dev_dbg(&dev->interface->dev, "%s count: %lld\n",
__func__, dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
}
else
dev_dbg(&dev->interface->dev,
"data format error - no EOF\n");
break;
case CMD_ACK:
dev_dbg(&dev->interface->dev, "%s ack: %c\n",
__func__, buf[1]);
wake_up_interruptible(&dev->waitq);
break;
}
exit:
retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
__func__, retval);
}
}
static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_yurex *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int retval = -ENOMEM;
int i;
DEFINE_WAIT(wait);
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(&interface->dev, "Out of memory\n");
goto error;
}
kref_init(&dev->kref);
mutex_init(&dev->io_mutex);
spin_lock_init(&dev->lock);
init_waitqueue_head(&dev->waitq);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(endpoint)) {
dev->int_in_endpointAddr = endpoint->bEndpointAddress;
break;
}
}
if (!dev->int_in_endpointAddr) {
retval = -ENODEV;
dev_err(&interface->dev, "Could not find endpoints\n");
goto error;
}
/* allocate control URB */
dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->cntl_urb) {
dev_err(&interface->dev, "Could not allocate control URB\n");
goto error;
}
/* allocate buffer for control req */
dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
if (!dev->cntl_req) {
dev_err(&interface->dev, "Could not allocate cntl_req\n");
goto error;
}
/* allocate buffer for control msg */
dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
GFP_KERNEL,
&dev->cntl_urb->transfer_dma);
if (!dev->cntl_buffer) {
dev_err(&interface->dev, "Could not allocate cntl_buffer\n");
goto error;
}
/* configure control URB */
dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS |
USB_RECIP_INTERFACE;
dev->cntl_req->bRequest = HID_REQ_SET_REPORT;
dev->cntl_req->wValue = cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8);
dev->cntl_req->wIndex = cpu_to_le16(iface_desc->desc.bInterfaceNumber);
dev->cntl_req->wLength = cpu_to_le16(YUREX_BUF_SIZE);
usb_fill_control_urb(dev->cntl_urb, dev->udev,
usb_sndctrlpipe(dev->udev, 0),
(void *)dev->cntl_req, dev->cntl_buffer,
YUREX_BUF_SIZE, yurex_control_callback, dev);
dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* allocate interrupt URB */
dev->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb) {
dev_err(&interface->dev, "Could not allocate URB\n");
goto error;
}
/* allocate buffer for interrupt in */
dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
GFP_KERNEL, &dev->urb->transfer_dma);
if (!dev->int_buffer) {
dev_err(&interface->dev, "Could not allocate int_buffer\n");
goto error;
}
/* configure interrupt URB */
usb_fill_int_urb(dev->urb, dev->udev,
usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
dev, 1);
dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
retval = -EIO;
dev_err(&interface->dev, "Could not submitting URB\n");
goto error;
}
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &yurex_class);
if (retval) {
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
usb_set_intfdata(interface, NULL);
goto error;
}
dev->bbu = -1;
dev_info(&interface->dev,
"USB YUREX device now attached to Yurex #%d\n",
interface->minor);
return 0;
error:
if (dev)
/* this frees allocated memory */
kref_put(&dev->kref, yurex_delete);
return retval;
}
static void yurex_disconnect(struct usb_interface *interface)
{
struct usb_yurex *dev;
int minor = interface->minor;
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
dev->interface = NULL;
mutex_unlock(&dev->io_mutex);
/* wakeup waiters */
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
wake_up_interruptible(&dev->waitq);
/* decrement our usage count */
kref_put(&dev->kref, yurex_delete);
dev_info(&interface->dev, "USB YUREX #%d now disconnected\n", minor);
}
static struct usb_driver yurex_driver = {
.name = "yurex",
.probe = yurex_probe,
.disconnect = yurex_disconnect,
.id_table = yurex_table,
};
static int yurex_fasync(int fd, struct file *file, int on)
{
struct usb_yurex *dev;
dev = (struct usb_yurex *)file->private_data;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int yurex_open(struct inode *inode, struct file *file)
{
struct usb_yurex *dev;
struct usb_interface *interface;
int subminor;
int retval = 0;
subminor = iminor(inode);
interface = usb_find_interface(&yurex_driver, subminor);
if (!interface) {
printk(KERN_ERR "%s - error, can't find device for minor %d",
__func__, subminor);
retval = -ENODEV;
goto exit;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
goto exit;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
/* save our object in the file's private structure */
mutex_lock(&dev->io_mutex);
file->private_data = dev;
mutex_unlock(&dev->io_mutex);
exit:
return retval;
}
static int yurex_release(struct inode *inode, struct file *file)
{
struct usb_yurex *dev;
dev = (struct usb_yurex *)file->private_data;
if (dev == NULL)
return -ENODEV;
/* decrement the count on our device */
kref_put(&dev->kref, yurex_delete);
return 0;
}
static ssize_t yurex_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
{
struct usb_yurex *dev;
int retval = 0;
int bytes_read = 0;
char in_buffer[20];
unsigned long flags;
dev = (struct usb_yurex *)file->private_data;
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* already disconnected */
retval = -ENODEV;
goto exit;
}
spin_lock_irqsave(&dev->lock, flags);
bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
if (*ppos < bytes_read) {
if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
retval = -EFAULT;
else {
retval = bytes_read - *ppos;
*ppos += bytes_read;
}
}
exit:
mutex_unlock(&dev->io_mutex);
return retval;
}
static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos)
{
struct usb_yurex *dev;
int i, set = 0, retval = 0;
char buffer[16];
char *data = buffer;
unsigned long long c, c2 = 0;
signed long timeout = 0;
DEFINE_WAIT(wait);
count = min(sizeof(buffer), count);
dev = (struct usb_yurex *)file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto error;
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* alreaday disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
}
if (copy_from_user(buffer, user_buffer, count)) {
mutex_unlock(&dev->io_mutex);
retval = -EFAULT;
goto error;
}
memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
switch (buffer[0]) {
case CMD_ANIMATE:
case CMD_LED:
dev->cntl_buffer[0] = buffer[0];
dev->cntl_buffer[1] = buffer[1];
dev->cntl_buffer[2] = CMD_EOF;
break;
case CMD_READ:
case CMD_VERSION:
dev->cntl_buffer[0] = buffer[0];
dev->cntl_buffer[1] = 0x00;
dev->cntl_buffer[2] = CMD_EOF;
break;
case CMD_SET:
data++;
/* FALL THROUGH */
case '0' ... '9':
set = 1;
c = c2 = simple_strtoull(data, NULL, 0);
dev->cntl_buffer[0] = CMD_SET;
for (i = 1; i < 6; i++) {
dev->cntl_buffer[i] = (c>>32) & 0xff;
c <<= 8;
}
buffer[6] = CMD_EOF;
break;
default:
mutex_unlock(&dev->io_mutex);
return -EINVAL;
}
/* send the data as the control msg */
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
dev->cntl_buffer[0]);
retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
if (retval >= 0)
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
dev_err(&dev->interface->dev,
"%s - failed to send bulk msg, error %d\n",
__func__, retval);
goto error;
}
if (set && timeout)
dev->bbu = c2;
return timeout ? count : -EIO;
error:
return retval;
}
static const struct file_operations yurex_fops = {
.owner = THIS_MODULE,
.read = yurex_read,
.write = yurex_write,
.open = yurex_open,
.release = yurex_release,
.fasync = yurex_fasync,
.llseek = default_llseek,
};
module_usb_driver(yurex_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Android-Dongyf/itop-kernel | drivers/net/wireless/b43/leds.c | 2318 | 9242 | /*
Broadcom B43 wireless driver
LED control
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "leds.h"
#include "rfkill.h"
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl &= ~(1 << led_index);
else
ctl |= (1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl |= (1 << led_index);
else
ctl &= ~(1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_update(struct b43_wldev *dev,
struct b43_led *led)
{
bool radio_enabled;
bool turn_on;
if (!led->wl)
return;
radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
/* The led->state read is racy, but we don't care. In case we raced
* with the brightness_set handler, we will be called again soon
* to fixup our state. */
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
turn_on = 0;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
if (turn_on)
b43_led_turn_on(dev, led->index, led->activelow);
else
b43_led_turn_off(dev, led->index, led->activelow);
}
static void b43_leds_work(struct work_struct *work)
{
struct b43_leds *leds = container_of(work, struct b43_leds, work);
struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
struct b43_wldev *dev;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
goto out_unlock;
b43_led_update(dev, &wl->leds.led_tx);
b43_led_update(dev, &wl->leds.led_rx);
b43_led_update(dev, &wl->leds.led_radio);
b43_led_update(dev, &wl->leds.led_assoc);
out_unlock:
mutex_unlock(&wl->mutex);
}
/* Callback from the LED subsystem. */
static void b43_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
struct b43_wl *wl = led->wl;
if (likely(!wl->leds.stop)) {
atomic_set(&led->state, brightness);
ieee80211_queue_work(wl->hw, &wl->leds.work);
}
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
const char *name, const char *default_trigger,
u8 led_index, bool activelow)
{
int err;
if (led->wl)
return -EEXIST;
if (!default_trigger)
return -EINVAL;
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
strncpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
led->led_dev.brightness_set = b43_led_brightness_set;
err = led_classdev_register(dev->sdev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
led->wl = NULL;
return err;
}
return 0;
}
static void b43_unregister_led(struct b43_led *led)
{
if (!led->wl)
return;
led_classdev_unregister(&led->led_dev);
led->wl = NULL;
}
static void b43_map_led(struct b43_wldev *dev,
u8 led_index,
enum b43_led_behaviour behaviour,
bool activelow)
{
struct ieee80211_hw *hw = dev->wl->hw;
char name[B43_LED_MAX_NAME_LEN + 1];
/* Map the b43 specific LED behaviour value to the
* generic LED triggers. */
switch (behaviour) {
case B43_LED_INACTIVE:
case B43_LED_OFF:
case B43_LED_ON:
break;
case B43_LED_ACTIVITY:
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
"b43-%s::tx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
"b43-%s::rx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
break;
case B43_LED_RADIO_ALL:
case B43_LED_RADIO_A:
case B43_LED_RADIO_B:
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_radio, name,
ieee80211_get_radio_led_name(hw),
led_index, activelow);
break;
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
"b43-%s::assoc", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
break;
default:
b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n",
behaviour);
break;
}
}
static void b43_led_get_sprominfo(struct b43_wldev *dev,
unsigned int led_index,
enum b43_led_behaviour *behaviour,
bool *activelow)
{
struct ssb_bus *bus = dev->sdev->bus;
u8 sprom[4];
sprom[0] = bus->sprom.gpio0;
sprom[1] = bus->sprom.gpio1;
sprom[2] = bus->sprom.gpio2;
sprom[3] = bus->sprom.gpio3;
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
*activelow = 0;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
*activelow = 1;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
case 1:
*behaviour = B43_LED_RADIO_B;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
*behaviour = B43_LED_ASSOC;
break;
case 2:
*behaviour = B43_LED_RADIO_A;
break;
case 3:
*behaviour = B43_LED_OFF;
break;
default:
*behaviour = B43_LED_OFF;
B43_WARN_ON(1);
return;
}
} else {
*behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
*activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
}
}
void b43_leds_init(struct b43_wldev *dev)
{
struct b43_led *led;
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
/* Sync the RF-kill LED state (if we have one) with radio and switch states. */
led = &dev->wl->leds.led_radio;
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
led->hw_state = 1;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = 0;
atomic_set(&led->state, 0);
}
}
/* Initialize TX/RX/ASSOC leds */
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = 0;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = 0;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = 0;
atomic_set(&led->state, 0);
}
/* Initialize other LED states. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
switch (behaviour) {
case B43_LED_OFF:
b43_led_turn_off(dev, i, activelow);
break;
case B43_LED_ON:
b43_led_turn_on(dev, i, activelow);
break;
default:
/* Leave others as-is. */
break;
}
}
dev->wl->leds.stop = 0;
}
void b43_leds_exit(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
}
void b43_leds_stop(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
leds->stop = 1;
cancel_work_sync(&leds->work);
}
void b43_leds_register(struct b43_wldev *dev)
{
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
INIT_WORK(&dev->wl->leds.work, b43_leds_work);
/* Register the LEDs to the LED subsystem. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
b43_map_led(dev, i, behaviour, activelow);
}
}
void b43_leds_unregister(struct b43_wl *wl)
{
struct b43_leds *leds = &wl->leds;
b43_unregister_led(&leds->led_tx);
b43_unregister_led(&leds->led_rx);
b43_unregister_led(&leds->led_assoc);
b43_unregister_led(&leds->led_radio);
}
| gpl-2.0 |
ED300/android_kernel_wingtech_msm8916 | net/mac80211/debugfs.c | 2318 | 10593 |
/*
* mac80211 debugfs for wireless PHYs
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* GPLv2
*
*/
#include <linux/debugfs.h>
#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "debugfs.h"
#define DEBUGFS_FORMAT_BUFFER_SIZE 100
int mac80211_format_buffer(char __user *userbuf, size_t count,
loff_t *ppos, char *fmt, ...)
{
va_list args;
char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
int res;
va_start(args, fmt);
res = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \
static ssize_t name## _read(struct file *file, char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct ieee80211_local *local = file->private_data; \
\
return mac80211_format_buffer(userbuf, count, ppos, \
fmt "\n", ##value); \
}
#define DEBUGFS_READONLY_FILE_OPS(name) \
static const struct file_operations name## _ops = { \
.read = name## _read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
DEBUGFS_READONLY_FILE_FN(name, fmt, value) \
DEBUGFS_READONLY_FILE_OPS(name)
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
DEBUGFS_READONLY_FILE(user_power, "%d",
local->user_power_level);
DEBUGFS_READONLY_FILE(power, "%d",
local->hw.conf.power_level);
DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
local->total_ps_buffered);
DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
local->wep_iv & 0xffffff);
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
#ifdef CONFIG_PM
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
rtnl_lock();
__ieee80211_suspend(&local->hw, NULL);
__ieee80211_resume(&local->hw);
rtnl_unlock();
return count;
}
static const struct file_operations reset_ops = {
.write = reset_write,
.open = simple_open,
.llseek = noop_llseek,
};
#endif
static ssize_t hwflags_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
int mxln = 500;
ssize_t rv;
char *buf = kzalloc(mxln, GFP_KERNEL);
int sf = 0; /* how many written so far */
if (!buf)
return 0;
sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
sf += snprintf(buf + sf, mxln - sf,
"HOST_BCAST_PS_BUFFERING\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
sf += snprintf(buf + sf, mxln - sf,
"2GHZ_SHORT_SLOT_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
sf += snprintf(buf + sf, mxln - sf,
"2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
return rv;
}
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
unsigned long flags;
char buf[IEEE80211_MAX_QUEUES * 20];
int q, res = 0;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (q = 0; q < local->hw.queues; q++)
res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q,
local->queue_stop_reasons[q],
skb_queue_len(&local->pending[q]));
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return simple_read_from_buffer(user_buf, count, ppos, buf, res);
}
DEBUGFS_READONLY_FILE_OPS(hwflags);
DEBUGFS_READONLY_FILE_OPS(queues);
/* statistics stuff */
static ssize_t format_devstat_counter(struct ieee80211_local *local,
char __user *userbuf,
size_t count, loff_t *ppos,
int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf,
int buflen))
{
struct ieee80211_low_level_stats stats;
char buf[20];
int res;
rtnl_lock();
res = drv_get_stats(local, &stats);
rtnl_unlock();
if (res)
return res;
res = printvalue(&stats, buf, sizeof(buf));
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
#define DEBUGFS_DEVSTATS_FILE(name) \
static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%u\n", stats->name); \
} \
static ssize_t stats_ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return format_devstat_counter(file->private_data, \
userbuf, \
count, \
ppos, \
print_devstats_##name); \
} \
\
static const struct file_operations stats_ ##name## _ops = { \
.read = stats_ ##name## _read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_STATS_ADD(name, field) \
debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
#define DEBUGFS_DEVSTATS_ADD(name) \
debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount);
void debugfs_hw_add(struct ieee80211_local *local)
{
struct dentry *phyd = local->hw.wiphy->debugfsdir;
struct dentry *statsd;
if (!phyd)
return;
local->debugfs.keys = debugfs_create_dir("keys", phyd);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(queues);
#ifdef CONFIG_PM
DEBUGFS_ADD_MODE(reset, 0200);
#endif
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
statsd = debugfs_create_dir("statistics", phyd);
/* if the dir failed, don't put all the other things into the root! */
if (!statsd)
return;
DEBUGFS_STATS_ADD(transmitted_fragment_count,
local->dot11TransmittedFragmentCount);
DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
local->dot11MulticastTransmittedFrameCount);
DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
DEBUGFS_STATS_ADD(multiple_retry_count,
local->dot11MultipleRetryCount);
DEBUGFS_STATS_ADD(frame_duplicate_count,
local->dot11FrameDuplicateCount);
DEBUGFS_STATS_ADD(received_fragment_count,
local->dot11ReceivedFragmentCount);
DEBUGFS_STATS_ADD(multicast_received_frame_count,
local->dot11MulticastReceivedFrameCount);
DEBUGFS_STATS_ADD(transmitted_frame_count,
local->dot11TransmittedFrameCount);
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
local->tx_handlers_drop_unencrypted);
DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
local->tx_handlers_drop_fragment);
DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
local->tx_handlers_drop_wep);
DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
local->tx_handlers_drop_not_assoc);
DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
local->tx_handlers_drop_unauth_port);
DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
local->rx_handlers_drop_nullfunc);
DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
local->rx_handlers_drop_defrag);
DEBUGFS_STATS_ADD(rx_handlers_drop_short,
local->rx_handlers_drop_short);
DEBUGFS_STATS_ADD(tx_expand_skb_head,
local->tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
local->tx_expand_skb_head_cloned);
DEBUGFS_STATS_ADD(rx_expand_skb_head,
local->rx_expand_skb_head);
DEBUGFS_STATS_ADD(rx_expand_skb_head2,
local->rx_expand_skb_head2);
DEBUGFS_STATS_ADD(rx_handlers_fragments,
local->rx_handlers_fragments);
DEBUGFS_STATS_ADD(tx_status_drop,
local->tx_status_drop);
#endif
DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
}
| gpl-2.0 |
SaatvikShukla/android_kernel_sony_msm8974-GPE | arch/s390/mm/fault.c | 2318 | 16621 | /*
* arch/s390/mm/fault.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
static unsigned long store_indication;
void fault_init(void)
{
if (test_facility(2) && test_facility(75))
store_indication = 0xc00;
}
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Unlock any spinlocks which will prevent us from getting the
* message out.
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
console_unblank();
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15;
printk(" ");
console_loglevel = loglevel_save;
}
}
/*
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
static inline int user_space_fault(unsigned long trans_exc_code)
{
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
trans_exc_code &= 3;
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
if (user_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
* If the user space is not the home space the kernel runs in home
* space. Access via secondary space has already been covered,
* access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
}
static inline void report_user_fault(struct pt_regs *regs, long signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk(KERN_ALERT "User process fault: interruption code 0x%X ",
regs->int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
printk(KERN_ALERT "failing address: %lX\n",
regs->int_parm_long & __FAIL_ADDR_MASK);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
struct siginfo si;
report_user_fault(regs, SIGSEGV);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
}
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long address;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
address = regs->int_parm_long & __FAIL_ADDR_MASK;
if (!user_space_fault(regs->int_parm_long))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
die(regs, "Oops");
do_exit(SIGKILL);
}
static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die (regs, "Low-address protection");
do_exit(SIGKILL);
}
do_no_context(regs);
}
static noinline void do_sigbus(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGBUS, &si, tsk);
}
static noinline void do_fault_error(struct pt_regs *regs, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, si_code);
return;
}
case VM_FAULT_BADCONTEXT:
do_no_context(regs);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs);
else
do_sigbus(regs);
} else
BUG();
break;
}
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
int fault;
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
#ifdef CONFIG_PGSTE
if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
address = __gmap_fault(address,
(struct gmap *) S390_lowcore.gmap);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
goto out_up;
}
if (address == -ENOMEM) {
fault = VM_FAULT_OOM;
goto out_up;
}
}
#endif
retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
down_read(&mm->mmap_sem);
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
void __kprobes do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
int fault;
trans_exc_code = regs->int_parm_long;
/* Protection exception is suppressing, decrement psw address. */
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs);
return;
}
fault = do_exception(regs, VM_WRITE);
if (unlikely(fault))
do_fault_error(regs, fault);
}
void __kprobes do_dat_exception(struct pt_regs *regs)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access);
if (unlikely(fault))
do_fault_error(regs, fault);
}
#ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long trans_exc_code;
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
if (vma) {
update_mm(mm, current);
return;
}
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
do_sigsegv(regs, SEGV_MAPERR);
return;
}
no_context:
do_no_context(regs);
}
#endif
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
regs.int_code = pgm_int_code;
regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(®s, access);
if (unlikely(fault)) {
if (fault & VM_FAULT_OOM)
return -EFAULT;
else if (fault & VM_FAULT_SIGBUS)
do_sigbus(®s);
}
return fault ? -EFAULT : 0;
}
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
__setup("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_CURRENT_PID,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
"0: j 2f\n"
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
return rc;
}
void pfault_fini(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (pfault_disable)
return;
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
*/
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
if (subcode & 0x0080) {
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
} else {
tsk = current;
}
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
} else {
/* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep.
* If the task is not running, ignore the completion
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts. */
if (tsk->state == TASK_RUNNING)
tsk->thread.pfault_wait = -1;
}
put_task_struct(tsk);
} else {
/* signal bit not set -> a real page is missing. */
if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit. */
tsk->thread.pfault_wait = 0;
} else {
/* Initial interrupt arrived before completion
* interrupt. Let the task sleep. */
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
spin_unlock(&pfault_lock);
}
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
{
int rc;
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
service_subclass_irq_register();
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
| gpl-2.0 |
bmc08gt/kernel_samsung_exynos7420 | drivers/staging/usbip/stub_rx.c | 2318 | 15579 | /*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#include <asm/byteorder.h>
#include <linux/kthread.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usbip_common.h"
#include "stub.h"
static int is_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
(req->bRequestType == USB_RECIP_ENDPOINT) &&
(req->wValue == USB_ENDPOINT_HALT);
}
static int is_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_INTERFACE) &&
(req->bRequestType == USB_RECIP_INTERFACE);
}
static int is_set_configuration_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
req = (struct usb_ctrlrequest *) urb->setup_packet;
return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
(req->bRequestType == USB_RECIP_DEVICE);
}
static int is_reset_device_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 value;
__u16 index;
req = (struct usb_ctrlrequest *) urb->setup_packet;
value = le16_to_cpu(req->wValue);
index = le16_to_cpu(req->wIndex);
if ((req->bRequest == USB_REQ_SET_FEATURE) &&
(req->bRequestType == USB_RT_PORT) &&
(value == USB_PORT_FEAT_RESET)) {
usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
return 1;
} else
return 0;
}
static int tweak_clear_halt_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
int target_endp;
int target_dir;
int target_pipe;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
/*
* The stalled endpoint is specified in the wIndex value. The endpoint
* of the urb is the target of this clear_halt request (i.e., control
* endpoint).
*/
target_endp = le16_to_cpu(req->wIndex) & 0x000f;
/* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
target_dir = le16_to_cpu(req->wIndex) & 0x0080;
if (target_dir)
target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
else
target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
ret = usb_clear_halt(urb->dev, target_pipe);
if (ret < 0)
dev_err(&urb->dev->dev, "usb_clear_halt error: devnum %d endp "
"%d ret %d\n", urb->dev->devnum, target_endp, ret);
else
dev_info(&urb->dev->dev, "usb_clear_halt done: devnum %d endp "
"%d\n", urb->dev->devnum, target_endp);
return ret;
}
static int tweak_set_interface_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 alternate;
__u16 interface;
int ret;
req = (struct usb_ctrlrequest *) urb->setup_packet;
alternate = le16_to_cpu(req->wValue);
interface = le16_to_cpu(req->wIndex);
usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
interface, alternate);
ret = usb_set_interface(urb->dev, interface, alternate);
if (ret < 0)
dev_err(&urb->dev->dev, "usb_set_interface error: inf %u alt "
"%u ret %d\n", interface, alternate, ret);
else
dev_info(&urb->dev->dev, "usb_set_interface done: inf %u alt "
"%u\n", interface, alternate);
return ret;
}
static int tweak_set_configuration_cmd(struct urb *urb)
{
struct usb_ctrlrequest *req;
__u16 config;
req = (struct usb_ctrlrequest *) urb->setup_packet;
config = le16_to_cpu(req->wValue);
/*
* I have never seen a multi-config device. Very rare.
* For most devices, this will be called to choose a default
* configuration only once in an initialization phase.
*
* set_configuration may change a device configuration and its device
* drivers will be unbound and assigned for a new device configuration.
* This means this usbip driver will be also unbound when called, then
* eventually reassigned to the device as far as driver matching
* condition is kept.
*
* Unfortunately, an existing usbip connection will be dropped
* due to this driver unbinding. So, skip here.
* A user may need to set a special configuration value before
* exporting the device.
*/
dev_info(&urb->dev->dev, "usb_set_configuration %d to %s... skip!\n",
config, dev_name(&urb->dev->dev));
return 0;
}
static int tweak_reset_device_cmd(struct urb *urb)
{
struct stub_priv *priv = (struct stub_priv *) urb->context;
struct stub_device *sdev = priv->sdev;
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
/*
* With the implementation of pre_reset and post_reset the driver no
* longer unbinds. This allows the use of synchronous reset.
*/
if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
usb_reset_device(sdev->udev);
usb_unlock_device(sdev->udev);
return 0;
}
/*
* clear_halt, set_interface, and set_configuration require special tricks.
*/
static void tweak_special_requests(struct urb *urb)
{
if (!urb || !urb->setup_packet)
return;
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
return;
if (is_clear_halt_cmd(urb))
/* tweak clear_halt */
tweak_clear_halt_cmd(urb);
else if (is_set_interface_cmd(urb))
/* tweak set_interface */
tweak_set_interface_cmd(urb);
else if (is_set_configuration_cmd(urb))
/* tweak set_configuration */
tweak_set_configuration_cmd(urb);
else if (is_reset_device_cmd(urb))
tweak_reset_device_cmd(urb);
else
usbip_dbg_stub_rx("no need to tweak\n");
}
/*
* stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
* By unlinking the urb asynchronously, stub_rx can continuously
* process coming urbs. Even if the urb is unlinked, its completion
* handler will be called and stub_tx will send a return pdu.
*
* See also comments about unlinking strategy in vhci_hcd.c.
*/
static int stub_recv_cmd_unlink(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
unsigned long flags;
struct stub_priv *priv;
spin_lock_irqsave(&sdev->priv_lock, flags);
list_for_each_entry(priv, &sdev->priv_init, list) {
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
priv->urb);
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
* cancelling it. The unlinking flag means that we are
* now not going to return the normal result pdu of a
* submission request, but going to return a result pdu
* of the unlink request.
*/
priv->unlinking = 1;
/*
* In the case that unlinking flag is on, prev->seqnum
* is changed from the seqnum of the cancelling urb to
* the seqnum of the unlink request. This will be used
* to make the result pdu of the unlink request.
*/
priv->seqnum = pdu->base.seqnum;
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/*
* usb_unlink_urb() is now out of spinlocking to avoid
* spinlock recursion since stub_complete() is
* sometimes called in this context but not in the
* interrupt context. If stub_complete() is executed
* before we call usb_unlink_urb(), usb_unlink_urb()
* will return an error value. In this case, stub_tx
* will return the result pdu of this unlink request
* though submission is completed and actual unlinking
* is not executed. OK?
*/
/* In the above case, urb->status is not -ECONNRESET,
* so a driver in a client host will know the failure
* of the unlink request ?
*/
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
"failed to unlink a urb %p, ret %d\n",
priv->urb, ret);
return 0;
}
usbip_dbg_stub_rx("seqnum %d is not pending\n",
pdu->u.cmd_unlink.seqnum);
/*
* The urb of the unlink target is not found in priv_init queue. It was
* already completed and its results is/was going to be sent by a
* CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
* return the completeness of this unlink request to vhci_hcd.
*/
stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return 0;
}
static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usbip_device *ud = &sdev->ud;
int valid = 0;
if (pdu->base.devid == sdev->devid) {
spin_lock_irq(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
valid = 1;
}
spin_unlock_irq(&ud->lock);
}
return valid;
}
static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
struct usbip_header *pdu)
{
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
unsigned long flags;
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
if (!priv) {
dev_err(&sdev->interface->dev, "alloc stub_priv\n");
spin_unlock_irqrestore(&sdev->priv_lock, flags);
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return NULL;
}
priv->seqnum = pdu->base.seqnum;
priv->sdev = sdev;
/*
* After a stub_priv is linked to a list_head,
* our error handler can free allocated data.
*/
list_add_tail(&priv->list, &sdev->priv_init);
spin_unlock_irqrestore(&sdev->priv_lock, flags);
return priv;
}
static int get_pipe(struct stub_device *sdev, int epnum, int dir)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
if (!ep) {
dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
epnum);
BUG();
}
epd = &ep->desc;
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
else
return usb_rcvctrlpipe(udev, epnum);
}
if (usb_endpoint_xfer_bulk(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndbulkpipe(udev, epnum);
else
return usb_rcvbulkpipe(udev, epnum);
}
if (usb_endpoint_xfer_int(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndintpipe(udev, epnum);
else
return usb_rcvintpipe(udev, epnum);
}
if (usb_endpoint_xfer_isoc(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
/* NOT REACHED */
dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
return 0;
}
static void masking_bogus_flags(struct urb *urb)
{
int xfertype;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
unsigned int allowed;
if (!urb || urb->hcpriv || !urb->complete)
return;
dev = urb->dev;
if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
return;
ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
[usb_pipeendpoint(urb->pipe)];
if (!ep)
return;
xfertype = usb_endpoint_type(&ep->desc);
if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *) urb->setup_packet;
if (!setup)
return;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
URB_DIR_MASK | URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
allowed |= URB_ZERO_PACKET;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_CONTROL:
allowed |= URB_NO_FSBR; /* only affects UHCI */
/* FALLTHROUGH */
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
break;
case USB_ENDPOINT_XFER_ISOC:
allowed |= URB_ISO_ASAP;
break;
}
urb->transfer_flags &= allowed;
}
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
int ret;
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
/* setup a urb */
if (usb_pipeisoc(pipe))
priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
GFP_KERNEL);
else
priv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->urb) {
dev_err(&sdev->interface->dev, "malloc urb\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* allocate urb transfer buffer, if needed */
if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
if (!priv->urb->transfer_buffer) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
}
/* copy urb setup packet */
priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
GFP_KERNEL);
if (!priv->urb->setup_packet) {
dev_err(&sdev->interface->dev, "allocate setup_packet\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
/* set other members from the base header of pdu */
priv->urb->context = (void *) priv;
priv->urb->dev = udev;
priv->urb->pipe = pipe;
priv->urb->complete = stub_complete;
usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
if (usbip_recv_xbuff(ud, priv->urb) < 0)
return;
if (usbip_recv_iso(ud, priv->urb) < 0)
return;
/* no need to submit an intercepted request, but harmless? */
tweak_special_requests(priv->urb);
masking_bogus_flags(priv->urb);
/* urb is now ready to submit */
ret = usb_submit_urb(priv->urb, GFP_KERNEL);
if (ret == 0)
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urb);
/*
* Pessimistic.
* This connection will be discarded.
*/
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
}
usbip_dbg_stub_rx("Leave\n");
return;
}
/* recv a pdu */
static void stub_rx_pdu(struct usbip_device *ud)
{
int ret;
struct usbip_header pdu;
struct stub_device *sdev = container_of(ud, struct stub_device, ud);
struct device *dev = &sdev->interface->dev;
usbip_dbg_stub_rx("Enter\n");
memset(&pdu, 0, sizeof(pdu));
/* receive a pdu header */
ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
usbip_header_correct_endian(&pdu, 0);
if (usbip_dbg_flag_stub_rx)
usbip_dump_header(&pdu);
if (!valid_request(sdev, &pdu)) {
dev_err(dev, "recv invalid request\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
return;
}
switch (pdu.base.command) {
case USBIP_CMD_UNLINK:
stub_recv_cmd_unlink(sdev, &pdu);
break;
case USBIP_CMD_SUBMIT:
stub_recv_cmd_submit(sdev, &pdu);
break;
default:
/* NOTREACHED */
dev_err(dev, "unknown pdu\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
break;
}
}
int stub_rx_loop(void *data)
{
struct usbip_device *ud = data;
while (!kthread_should_stop()) {
if (usbip_event_happened(ud))
break;
stub_rx_pdu(ud);
}
return 0;
}
| gpl-2.0 |
rogersb11/android_kernel_samsung_smdk4412 | arch/powerpc/platforms/embedded6xx/flipper-pic.c | 2830 | 5500 | /*
* arch/powerpc/platforms/embedded6xx/flipper-pic.c
*
* Nintendo GameCube/Wii "Flipper" interrupt controller support.
* Copyright (C) 2004-2009 The GameCube Linux Team
* Copyright (C) 2007,2008,2009 Albert Herranz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#define DRV_MODULE_NAME "flipper-pic"
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/io.h>
#include "flipper-pic.h"
#define FLIPPER_NR_IRQS 32
/*
* Each interrupt has a corresponding bit in both
* the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
*
* Enabling/disabling an interrupt line involves setting/clearing
* the corresponding bit in IMR.
* Except for the RSW interrupt, all interrupts get deasserted automatically
* when the source deasserts the interrupt.
*/
#define FLIPPER_ICR 0x00
#define FLIPPER_ICR_RSS (1<<16) /* reset switch state */
#define FLIPPER_IMR 0x04
#define FLIPPER_RESET 0x24
/*
* IRQ chip hooks.
*
*/
static void flipper_pic_mask_and_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + FLIPPER_IMR, mask);
/* this is at least needed for RSW */
out_be32(io_base + FLIPPER_ICR, mask);
}
static void flipper_pic_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
/* this is at least needed for RSW */
out_be32(io_base + FLIPPER_ICR, 1 << irq);
}
static void flipper_pic_mask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + FLIPPER_IMR, 1 << irq);
}
static void flipper_pic_unmask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + FLIPPER_IMR, 1 << irq);
}
static struct irq_chip flipper_pic = {
.name = "flipper-pic",
.irq_ack = flipper_pic_ack,
.irq_mask_ack = flipper_pic_mask_and_ack,
.irq_mask = flipper_pic_mask,
.irq_unmask = flipper_pic_unmask,
};
/*
* IRQ host hooks.
*
*/
static struct irq_host *flipper_irq_host;
static int flipper_pic_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq);
return 0;
}
static int flipper_pic_match(struct irq_host *h, struct device_node *np)
{
return 1;
}
static struct irq_host_ops flipper_irq_host_ops = {
.map = flipper_pic_map,
.match = flipper_pic_match,
};
/*
* Platform hooks.
*
*/
static void __flipper_quiesce(void __iomem *io_base)
{
/* mask and ack all IRQs */
out_be32(io_base + FLIPPER_IMR, 0x00000000);
out_be32(io_base + FLIPPER_ICR, 0xffffffff);
}
struct irq_host * __init flipper_pic_init(struct device_node *np)
{
struct device_node *pi;
struct irq_host *irq_host = NULL;
struct resource res;
void __iomem *io_base;
int retval;
pi = of_get_parent(np);
if (!pi) {
pr_err("no parent found\n");
goto out;
}
if (!of_device_is_compatible(pi, "nintendo,flipper-pi")) {
pr_err("unexpected parent compatible\n");
goto out;
}
retval = of_address_to_resource(pi, 0, &res);
if (retval) {
pr_err("no io memory range found\n");
goto out;
}
io_base = ioremap(res.start, resource_size(&res));
pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
__flipper_quiesce(io_base);
irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS,
&flipper_irq_host_ops, -1);
if (!irq_host) {
pr_err("failed to allocate irq_host\n");
return NULL;
}
irq_host->host_data = io_base;
out:
return irq_host;
}
unsigned int flipper_pic_get_irq(void)
{
void __iomem *io_base = flipper_irq_host->host_data;
int irq;
u32 irq_status;
irq_status = in_be32(io_base + FLIPPER_ICR) &
in_be32(io_base + FLIPPER_IMR);
if (irq_status == 0)
return NO_IRQ; /* no more IRQs pending */
irq = __ffs(irq_status);
return irq_linear_revmap(flipper_irq_host, irq);
}
/*
* Probe function.
*
*/
void __init flipper_pic_probe(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic");
BUG_ON(!np);
flipper_irq_host = flipper_pic_init(np);
BUG_ON(!flipper_irq_host);
irq_set_default_host(flipper_irq_host);
of_node_put(np);
}
/*
* Misc functions related to the flipper chipset.
*
*/
/**
* flipper_quiesce() - quiesce flipper irq controller
*
* Mask and ack all interrupt sources.
*
*/
void flipper_quiesce(void)
{
void __iomem *io_base = flipper_irq_host->host_data;
__flipper_quiesce(io_base);
}
/*
* Resets the platform.
*/
void flipper_platform_reset(void)
{
void __iomem *io_base;
if (flipper_irq_host && flipper_irq_host->host_data) {
io_base = flipper_irq_host->host_data;
out_8(io_base + FLIPPER_RESET, 0x00);
}
}
/*
* Returns non-zero if the reset button is pressed.
*/
int flipper_is_reset_button_pressed(void)
{
void __iomem *io_base;
u32 icr;
if (flipper_irq_host && flipper_irq_host->host_data) {
io_base = flipper_irq_host->host_data;
icr = in_be32(io_base + FLIPPER_ICR);
return !(icr & FLIPPER_ICR_RSS);
}
return 0;
}
| gpl-2.0 |
bsmitty83/Kernel_Tuna_AOSP | drivers/staging/octeon/ethernet-spi.c | 2830 | 10156 | /**********************************************************************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2007 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-util.h"
#include "cvmx-spi.h"
#include <asm/octeon/cvmx-npi-defs.h>
#include "cvmx-spxx-defs.h"
#include "cvmx-stxx-defs.h"
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
{
irqreturn_t return_status = IRQ_NONE;
union cvmx_npi_rsl_int_blocks rsl_int_blocks;
/* Check and see if this interrupt was caused by the GMX block */
rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
if (rsl_int_blocks.s.spx1) { /* 19 - SPX1_INT_REG & STX1_INT_REG */
union cvmx_spxx_int_reg spx_int_reg;
union cvmx_stxx_int_reg stx_int_reg;
spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(1));
cvmx_write_csr(CVMX_SPXX_INT_REG(1), spx_int_reg.u64);
if (!need_retrain[1]) {
spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(1));
if (spx_int_reg.s.spf)
pr_err("SPI1: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
pr_err("SPI1: SRX Spi4 Calendar table "
"parity error\n");
if (spx_int_reg.s.syncerr)
pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
"errors have exceeded "
"SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI1: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
pr_err("SPI1: SRX Selected port has hit "
"TPA overflow\n");
if (spx_int_reg.s.rsverr)
pr_err("SPI1: SRX Spi4 reserved control "
"word detected\n");
if (spx_int_reg.s.drwnng)
pr_err("SPI1: SRX Spi4 receive FIFO "
"drowning/overflow\n");
if (spx_int_reg.s.clserr)
pr_err("SPI1: SRX Spi4 packet closed on "
"non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
pr_err("SPI1: SRX Abnormal packet "
"termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI1: SRX Port out of range\n");
}
stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(1));
cvmx_write_csr(CVMX_STXX_INT_REG(1), stx_int_reg.u64);
if (!need_retrain[1]) {
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
if (stx_int_reg.s.syncerr)
pr_err("SPI1: STX Interface encountered a "
"fatal error\n");
if (stx_int_reg.s.frmerr)
pr_err("SPI1: STX FRMCNT has exceeded "
"STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
pr_err("SPI1: STX Unexpected framing "
"sequence\n");
if (stx_int_reg.s.nosync)
pr_err("SPI1: STX ERRCNT has exceeded "
"STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
pr_err("SPI1: STX DIP2 error on the Spi4 "
"Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI1: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
pr_err("SPI1: STX Transmit packet burst "
"too big\n");
if (stx_int_reg.s.calpar1)
pr_err("SPI1: STX Calendar Table Parity "
"Error Bank1\n");
if (stx_int_reg.s.calpar0)
pr_err("SPI1: STX Calendar Table Parity "
"Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
cvmx_write_csr(CVMX_STXX_INT_MSK(1), 0);
need_retrain[1] = 1;
return_status = IRQ_HANDLED;
}
if (rsl_int_blocks.s.spx0) { /* 18 - SPX0_INT_REG & STX0_INT_REG */
union cvmx_spxx_int_reg spx_int_reg;
union cvmx_stxx_int_reg stx_int_reg;
spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(0));
cvmx_write_csr(CVMX_SPXX_INT_REG(0), spx_int_reg.u64);
if (!need_retrain[0]) {
spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(0));
if (spx_int_reg.s.spf)
pr_err("SPI0: SRX Spi4 interface down\n");
if (spx_int_reg.s.calerr)
pr_err("SPI0: SRX Spi4 Calendar table "
"parity error\n");
if (spx_int_reg.s.syncerr)
pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
"errors have exceeded "
"SPX_ERR_CTL[ERRCNT]\n");
if (spx_int_reg.s.diperr)
pr_err("SPI0: SRX Spi4 DIP4 error\n");
if (spx_int_reg.s.tpaovr)
pr_err("SPI0: SRX Selected port has hit "
"TPA overflow\n");
if (spx_int_reg.s.rsverr)
pr_err("SPI0: SRX Spi4 reserved control "
"word detected\n");
if (spx_int_reg.s.drwnng)
pr_err("SPI0: SRX Spi4 receive FIFO "
"drowning/overflow\n");
if (spx_int_reg.s.clserr)
pr_err("SPI0: SRX Spi4 packet closed on "
"non-16B alignment without EOP\n");
if (spx_int_reg.s.spiovr)
pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
if (spx_int_reg.s.abnorm)
pr_err("SPI0: SRX Abnormal packet "
"termination (ERR bit)\n");
if (spx_int_reg.s.prtnxa)
pr_err("SPI0: SRX Port out of range\n");
}
stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(0));
cvmx_write_csr(CVMX_STXX_INT_REG(0), stx_int_reg.u64);
if (!need_retrain[0]) {
stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
if (stx_int_reg.s.syncerr)
pr_err("SPI0: STX Interface encountered a "
"fatal error\n");
if (stx_int_reg.s.frmerr)
pr_err("SPI0: STX FRMCNT has exceeded "
"STX_DIP_CNT[MAXFRM]\n");
if (stx_int_reg.s.unxfrm)
pr_err("SPI0: STX Unexpected framing "
"sequence\n");
if (stx_int_reg.s.nosync)
pr_err("SPI0: STX ERRCNT has exceeded "
"STX_DIP_CNT[MAXDIP]\n");
if (stx_int_reg.s.diperr)
pr_err("SPI0: STX DIP2 error on the Spi4 "
"Status channel\n");
if (stx_int_reg.s.datovr)
pr_err("SPI0: STX Spi4 FIFO overflow error\n");
if (stx_int_reg.s.ovrbst)
pr_err("SPI0: STX Transmit packet burst "
"too big\n");
if (stx_int_reg.s.calpar1)
pr_err("SPI0: STX Calendar Table Parity "
"Error Bank1\n");
if (stx_int_reg.s.calpar0)
pr_err("SPI0: STX Calendar Table Parity "
"Error Bank0\n");
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
cvmx_write_csr(CVMX_STXX_INT_MSK(0), 0);
need_retrain[0] = 1;
return_status = IRQ_HANDLED;
}
return return_status;
}
static void cvm_oct_spi_enable_error_reporting(int interface)
{
union cvmx_spxx_int_msk spxx_int_msk;
union cvmx_stxx_int_msk stxx_int_msk;
spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
spxx_int_msk.s.calerr = 1;
spxx_int_msk.s.syncerr = 1;
spxx_int_msk.s.diperr = 1;
spxx_int_msk.s.tpaovr = 1;
spxx_int_msk.s.rsverr = 1;
spxx_int_msk.s.drwnng = 1;
spxx_int_msk.s.clserr = 1;
spxx_int_msk.s.spiovr = 1;
spxx_int_msk.s.abnorm = 1;
spxx_int_msk.s.prtnxa = 1;
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
stxx_int_msk.s.frmerr = 1;
stxx_int_msk.s.unxfrm = 1;
stxx_int_msk.s.nosync = 1;
stxx_int_msk.s.diperr = 1;
stxx_int_msk.s.datovr = 1;
stxx_int_msk.s.ovrbst = 1;
stxx_int_msk.s.calpar1 = 1;
stxx_int_msk.s.calpar0 = 1;
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
}
static void cvm_oct_spi_poll(struct net_device *dev)
{
static int spi4000_port;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface;
for (interface = 0; interface < 2; interface++) {
if ((priv->port == interface * 16) && need_retrain[interface]) {
if (cvmx_spi_restart_interface
(interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
need_retrain[interface] = 0;
cvm_oct_spi_enable_error_reporting(interface);
}
}
/*
* The SPI4000 TWSI interface is very slow. In order
* not to bring the system to a crawl, we only poll a
* single port every second. This means negotiation
* speed changes take up to 10 seconds, but at least
* we don't waste absurd amounts of time waiting for
* TWSI.
*/
if (priv->port == spi4000_port) {
/*
* This function does nothing if it is called on an
* interface without a SPI4000.
*/
cvmx_spi4000_check_speed(interface, priv->port);
/*
* Normal ordering increments. By decrementing
* we only match once per iteration.
*/
spi4000_port--;
if (spi4000_port < 0)
spi4000_port = 10;
}
}
}
int cvm_oct_spi_init(struct net_device *dev)
{
int r;
struct octeon_ethernet *priv = netdev_priv(dev);
if (number_spi_ports == 0) {
r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
IRQF_SHARED, "SPI", &number_spi_ports);
if (r)
return r;
}
number_spi_ports++;
if ((priv->port == 0) || (priv->port == 16)) {
cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
priv->poll = cvm_oct_spi_poll;
}
cvm_oct_common_init(dev);
return 0;
}
void cvm_oct_spi_uninit(struct net_device *dev)
{
int interface;
cvm_oct_common_uninit(dev);
number_spi_ports--;
if (number_spi_ports == 0) {
for (interface = 0; interface < 2; interface++) {
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
}
free_irq(OCTEON_IRQ_RML, &number_spi_ports);
}
}
| gpl-2.0 |
wooshy1/android-tegra-nv-3.1 | drivers/scsi/ibmmca.c | 3342 | 87176 | /*
Low Level Linux Driver for the IBM Microchannel SCSI Subsystem for
Linux Kernel >= 2.4.0.
Copyright (c) 1995 Strom Systems, Inc. under the terms of the GNU
General Public License. Written by Martin Kolinek, December 1995.
Further development by: Chris Beauregard, Klaus Kudielka, Michael Lang
See the file Documentation/scsi/ibmmca.txt for a detailed description
of this driver, the commandline arguments and the history of its
development.
See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
updates, info and ADF-files for adapters supported by this driver.
Alan Cox <alan@lxorguk.ukuu.org.uk>
Updated for Linux 2.5.45 to use the new error handler, cleaned up the
lock macros and did a few unavoidable locking tweaks, plus one locking
fix in the irq and completion path.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/mca.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
/* Common forward declarations for all Linux-versions: */
static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *);
static int ibmmca_abort (Scsi_Cmnd *);
static int ibmmca_host_reset (Scsi_Cmnd *);
static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
/* driver configuration */
#define IM_MAX_HOSTS 8 /* maximum number of host adapters */
#define IM_RESET_DELAY 60 /* seconds allowed for a reset */
/* driver debugging - #undef all for normal operation */
/* if defined: count interrupts and ignore this special one: */
#undef IM_DEBUG_TIMEOUT //50
#define TIMEOUT_PUN 0
#define TIMEOUT_LUN 0
/* verbose interrupt: */
#undef IM_DEBUG_INT
/* verbose queuecommand: */
#undef IM_DEBUG_CMD
/* verbose queucommand for specific SCSI-device type: */
#undef IM_DEBUG_CMD_SPEC_DEV
/* verbose device probing */
#undef IM_DEBUG_PROBE
/* device type that shall be displayed on syslog (only during debugging): */
#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
/* relative addresses of hardware registers on a subsystem */
#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
/* basic I/O-port of first adapter */
#define IM_IO_PORT 0x3540
/* maximum number of hosts that can be found */
#define IM_N_IO_PORT 8
/*requests going into the upper nibble of the Attention register */
/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
#define IM_IMM_CMD 0x10 /*immediate command */
#define IM_SCB 0x30 /*Subsystem Control Block command */
#define IM_LONG_SCB 0x40 /*long Subsystem Control Block command */
#define IM_EOI 0xe0 /*end-of-interrupt request */
/*values for bits 7,1,0 of Basic Control reg. (bits 6-2 reserved) */
#define IM_HW_RESET 0x80 /*hardware reset */
#define IM_ENABLE_DMA 0x02 /*enable subsystem's busmaster DMA */
#define IM_ENABLE_INTR 0x01 /*enable interrupts to the system */
/*to interpret the upper nibble of Interrupt Status register */
/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
#define IM_SCB_CMD_COMPLETED 0x10
#define IM_SCB_CMD_COMPLETED_WITH_RETRIES 0x50
#define IM_LOOP_SCATTER_BUFFER_FULL 0x60
#define IM_ADAPTER_HW_FAILURE 0x70
#define IM_IMMEDIATE_CMD_COMPLETED 0xa0
#define IM_CMD_COMPLETED_WITH_FAILURE 0xc0
#define IM_CMD_ERROR 0xe0
#define IM_SOFTWARE_SEQUENCING_ERROR 0xf0
/*to interpret bits 3-0 of Basic Status register (bits 7-4 reserved) */
#define IM_CMD_REG_FULL 0x08
#define IM_CMD_REG_EMPTY 0x04
#define IM_INTR_REQUEST 0x02
#define IM_BUSY 0x01
/*immediate commands (word written into low 2 bytes of command reg) */
#define IM_RESET_IMM_CMD 0x0400
#define IM_FEATURE_CTR_IMM_CMD 0x040c
#define IM_DMA_PACING_IMM_CMD 0x040d
#define IM_ASSIGN_IMM_CMD 0x040e
#define IM_ABORT_IMM_CMD 0x040f
#define IM_FORMAT_PREP_IMM_CMD 0x0417
/*SCB (Subsystem Control Block) structure */
struct im_scb {
unsigned short command; /*command word (read, etc.) */
unsigned short enable; /*enable word, modifies cmd */
union {
unsigned long log_blk_adr; /*block address on SCSI device */
unsigned char scsi_cmd_length; /*6,10,12, for other scsi cmd */
} u1;
unsigned long sys_buf_adr; /*physical system memory adr */
unsigned long sys_buf_length; /*size of sys mem buffer */
unsigned long tsb_adr; /*Termination Status Block adr */
unsigned long scb_chain_adr; /*optional SCB chain address */
union {
struct {
unsigned short count; /*block count, on SCSI device */
unsigned short length; /*block length, on SCSI device */
} blk;
unsigned char scsi_command[12]; /*other scsi command */
} u2;
};
/*structure scatter-gather element (for list of system memory areas) */
struct im_sge {
void *address;
unsigned long byte_length;
};
/*structure returned by a get_pos_info command: */
struct im_pos_info {
unsigned short pos_id; /* adapter id */
unsigned char pos_3a; /* pos 3 (if pos 6 = 0) */
unsigned char pos_2; /* pos 2 */
unsigned char int_level; /* interrupt level IRQ 11 or 14 */
unsigned char pos_4a; /* pos 4 (if pos 6 = 0) */
unsigned short connector_size; /* MCA connector size: 16 or 32 Bit */
unsigned char num_luns; /* number of supported luns per device */
unsigned char num_puns; /* number of supported puns */
unsigned char pacing_factor; /* pacing factor */
unsigned char num_ldns; /* number of ldns available */
unsigned char eoi_off; /* time EOI and interrupt inactive */
unsigned char max_busy; /* time between reset and busy on */
unsigned short cache_stat; /* ldn cachestat. Bit=1 = not cached */
unsigned short retry_stat; /* retry status of ldns. Bit=1=disabled */
unsigned char pos_4b; /* pos 4 (if pos 6 = 1) */
unsigned char pos_3b; /* pos 3 (if pos 6 = 1) */
unsigned char pos_6; /* pos 6 */
unsigned char pos_5; /* pos 5 */
unsigned short max_overlap; /* maximum overlapping requests */
unsigned short num_bus; /* number of SCSI-busses */
};
/*values for SCB command word */
#define IM_NO_SYNCHRONOUS 0x0040 /*flag for any command */
#define IM_NO_DISCONNECT 0x0080 /*flag for any command */
#define IM_READ_DATA_CMD 0x1c01
#define IM_WRITE_DATA_CMD 0x1c02
#define IM_READ_VERIFY_CMD 0x1c03
#define IM_WRITE_VERIFY_CMD 0x1c04
#define IM_REQUEST_SENSE_CMD 0x1c08
#define IM_READ_CAPACITY_CMD 0x1c09
#define IM_DEVICE_INQUIRY_CMD 0x1c0b
#define IM_READ_LOGICAL_CMD 0x1c2a
#define IM_OTHER_SCSI_CMD_CMD 0x241f
/* unused, but supported, SCB commands */
#define IM_GET_COMMAND_COMPLETE_STATUS_CMD 0x1c07 /* command status */
#define IM_GET_POS_INFO_CMD 0x1c0a /* returns neat stuff */
#define IM_READ_PREFETCH_CMD 0x1c31 /* caching controller only */
#define IM_FOMAT_UNIT_CMD 0x1c16 /* format unit */
#define IM_REASSIGN_BLOCK_CMD 0x1c18 /* in case of error */
/*values to set bits in the enable word of SCB */
#define IM_READ_CONTROL 0x8000
#define IM_REPORT_TSB_ONLY_ON_ERROR 0x4000
#define IM_RETRY_ENABLE 0x2000
#define IM_POINTER_TO_LIST 0x1000
#define IM_SUPRESS_EXCEPTION_SHORT 0x0400
#define IM_BYPASS_BUFFER 0x0200
#define IM_CHAIN_ON_NO_ERROR 0x0001
/*TSB (Termination Status Block) structure */
struct im_tsb {
unsigned short end_status;
unsigned short reserved1;
unsigned long residual_byte_count;
unsigned long sg_list_element_adr;
unsigned short status_length;
unsigned char dev_status;
unsigned char cmd_status;
unsigned char dev_error;
unsigned char cmd_error;
unsigned short reserved2;
unsigned short reserved3;
unsigned short low_of_last_scb_adr;
unsigned short high_of_last_scb_adr;
};
/*subsystem uses interrupt request level 14 */
#define IM_IRQ 14
/*SCSI-2 F/W may evade to interrupt 11 */
#define IM_IRQ_FW 11
/* Model 95 has an additional alphanumeric display, which can be used
to display SCSI-activities. 8595 models do not have any disk led, which
makes this feature quite useful.
The regular PS/2 disk led is turned on/off by bits 6,7 of system
control port. */
/* LED display-port (actually, last LED on display) */
#define MOD95_LED_PORT 0x108
/* system-control-register of PS/2s with diskindicator */
#define PS2_SYS_CTR 0x92
/* activity displaying methods */
#define LED_DISP 1
#define LED_ADISP 2
#define LED_ACTIVITY 4
/* failed intr */
#define CMD_FAIL 255
/* The SCSI-ID(!) of the accessed SCSI-device is shown on PS/2-95 machines' LED
displays. ldn is no longer displayed here, because the ldn mapping is now
done dynamically and the ldn <-> pun,lun maps can be looked-up at boottime
or during uptime in /proc/scsi/ibmmca/<host_no> in case of trouble,
interest, debugging or just for having fun. The left number gives the
host-adapter number and the right shows the accessed SCSI-ID. */
/* display_mode is set by the ibmmcascsi= command line arg */
static int display_mode = 0;
/* set default adapter timeout */
static unsigned int adapter_timeout = 45;
/* for probing on feature-command: */
static unsigned int global_command_error_excuse = 0;
/* global setting by command line for adapter_speed */
static int global_adapter_speed = 0; /* full speed by default */
/* Panel / LED on, do it right for F/W addressin, too. adisplay will
* just ignore ids>7, as the panel has only 7 digits available */
#define PS2_DISK_LED_ON(ad,id) { if (display_mode & LED_DISP) { if (id>9) \
outw((ad+48)|((id+55)<<8), MOD95_LED_PORT ); else \
outw((ad+48)|((id+48)<<8), MOD95_LED_PORT ); } else \
if (display_mode & LED_ADISP) { if (id<7) outb((char)(id+48),MOD95_LED_PORT+1+id); \
outb((char)(ad+48), MOD95_LED_PORT); } \
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR); }
/* Panel / LED off */
/* bug fixed, Dec 15, 1997, where | was replaced by & here */
#define PS2_DISK_LED_OFF() { if (display_mode & LED_DISP) \
outw(0x2020, MOD95_LED_PORT ); else if (display_mode & LED_ADISP) { \
outl(0x20202020,MOD95_LED_PORT); outl(0x20202020,MOD95_LED_PORT+4); } \
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
/* types of different supported hardware that goes to hostdata special */
#define IBM_SCSI2_FW 0
#define IBM_7568_WCACHE 1
#define IBM_EXP_UNIT 2
#define IBM_SCSI_WCACHE 3
#define IBM_SCSI 4
#define IBM_INTEGSCSI 5
/* other special flags for hostdata structure */
#define FORCED_DETECTION 100
#define INTEGRATED_SCSI 101
/* List of possible IBM-SCSI-adapters */
static short ibmmca_id_table[] = {
0x8efc,
0x8efd,
0x8ef8,
0x8eff,
0x8efe,
/* No entry for integrated SCSI, that's part of the register */
0
};
static const char *ibmmca_description[] = {
"IBM SCSI-2 F/W Adapter", /* special = 0 */
"IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
"IBM Expansion Unit SCSI Controller", /* special = 2 */
"IBM SCSI Adapter w/Cache", /* special = 3 */
"IBM SCSI Adapter", /* special = 4 */
"IBM Integrated SCSI Controller", /* special = 5 */
};
/* Max number of logical devices (can be up from 0 to 14). 15 is the address
of the adapter itself. */
#define MAX_LOG_DEV 15
/*local data for a logical device */
struct logical_device {
struct im_scb scb; /* SCSI-subsystem-control-block structure */
struct im_tsb tsb; /* SCSI command complete status block structure */
struct im_sge sge[16]; /* scatter gather list structure */
unsigned char buf[256]; /* SCSI command return data buffer */
Scsi_Cmnd *cmd; /* SCSI-command that is currently in progress */
int device_type; /* type of the SCSI-device. See include/scsi/scsi.h
for interpretation of the possible values */
int block_length; /* blocksize of a particular logical SCSI-device */
int cache_flag; /* 1 if this is uncached, 0 if cache is present for ldn */
int retry_flag; /* 1 if adapter retry is disabled, 0 if enabled */
};
/* statistics of the driver during operations (for proc_info) */
struct Driver_Statistics {
/* SCSI statistics on the adapter */
int ldn_access[MAX_LOG_DEV + 1]; /* total accesses on a ldn */
int ldn_read_access[MAX_LOG_DEV + 1]; /* total read-access on a ldn */
int ldn_write_access[MAX_LOG_DEV + 1]; /* total write-access on a ldn */
int ldn_inquiry_access[MAX_LOG_DEV + 1]; /* total inquiries on a ldn */
int ldn_modeselect_access[MAX_LOG_DEV + 1]; /* total mode selects on ldn */
int scbs; /* short SCBs queued */
int long_scbs; /* long SCBs queued */
int total_accesses; /* total accesses on all ldns */
int total_interrupts; /* total interrupts (should be
same as total_accesses) */
int total_errors; /* command completed with error */
/* dynamical assignment statistics */
int total_scsi_devices; /* number of physical pun,lun */
int dyn_flag; /* flag showing dynamical mode */
int dynamical_assignments; /* number of remappings of ldns */
int ldn_assignments[MAX_LOG_DEV + 1]; /* number of remappings of each
ldn */
};
/* data structure for each host adapter */
struct ibmmca_hostdata {
/* array of logical devices: */
struct logical_device _ld[MAX_LOG_DEV + 1];
/* array to convert (pun, lun) into logical device number: */
unsigned char _get_ldn[16][8];
/*array that contains the information about the physical SCSI-devices
attached to this host adapter: */
unsigned char _get_scsi[16][8];
/* used only when checking logical devices: */
int _local_checking_phase_flag;
/* report received interrupt: */
int _got_interrupt;
/* report termination-status of SCSI-command: */
int _stat_result;
/* reset status (used only when doing reset): */
int _reset_status;
/* code of the last SCSI command (needed for panic info): */
int _last_scsi_command[MAX_LOG_DEV + 1];
/* identifier of the last SCSI-command type */
int _last_scsi_type[MAX_LOG_DEV + 1];
/* last blockcount */
int _last_scsi_blockcount[MAX_LOG_DEV + 1];
/* last locgical block address */
unsigned long _last_scsi_logical_block[MAX_LOG_DEV + 1];
/* Counter that points on the next reassignable ldn for dynamical
remapping. The default value is 7, that is the first reassignable
number in the list at boottime: */
int _next_ldn;
/* Statistics-structure for this IBM-SCSI-host: */
struct Driver_Statistics _IBM_DS;
/* This hostadapters pos-registers pos2 until pos6 */
unsigned int _pos[8];
/* assign a special variable, that contains dedicated info about the
adaptertype */
int _special;
/* connector size on the MCA bus */
int _connector_size;
/* synchronous SCSI transfer rate bitpattern */
int _adapter_speed;
};
/* macros to access host data structure */
#define subsystem_pun(h) ((h)->this_id)
#define subsystem_maxid(h) ((h)->max_id)
#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
/* Define a arbitrary number as subsystem-marker-type. This number is, as
described in the ANSI-SCSI-standard, not occupied by other device-types. */
#define TYPE_IBM_SCSI_ADAPTER 0x2F
/* Define 0xFF for no device type, because this type is not defined within
the ANSI-SCSI-standard, therefore, it can be used and should not cause any
harm. */
#define TYPE_NO_DEVICE 0xFF
/* define medium-changer. If this is not defined previously, e.g. Linux
2.0.x, define this type here. */
#ifndef TYPE_MEDIUM_CHANGER
#define TYPE_MEDIUM_CHANGER 0x08
#endif
/* define possible operations for the immediate_assign command */
#define SET_LDN 0
#define REMOVE_LDN 1
/* ldn which is used to probe the SCSI devices */
#define PROBE_LDN 0
/* reset status flag contents */
#define IM_RESET_NOT_IN_PROGRESS 0
#define IM_RESET_IN_PROGRESS 1
#define IM_RESET_FINISHED_OK 2
#define IM_RESET_FINISHED_FAIL 3
#define IM_RESET_NOT_IN_PROGRESS_NO_INT 4
#define IM_RESET_FINISHED_OK_NO_INT 5
/* define undefined SCSI-command */
#define NO_SCSI 0xffff
/*-----------------------------------------------------------------------*/
/* if this is nonzero, ibmmcascsi option has been passed to the kernel */
static int io_port[IM_MAX_HOSTS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
static int scsi_id[IM_MAX_HOSTS] = { 7, 7, 7, 7, 7, 7, 7, 7 };
/* fill module-parameters only, when this define is present.
(that is kernel version 2.1.x) */
#if defined(MODULE)
static char *boot_options = NULL;
module_param(boot_options, charp, 0);
module_param_array(io_port, int, NULL, 0);
module_param_array(scsi_id, int, NULL, 0);
MODULE_LICENSE("GPL");
#endif
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
static unsigned int pos[8]; /* whole pos register-line for diagnosis */
/* Taking into account the additions, made by ZP Gu.
* This selects now the preset value from the configfile and
* offers the 'normal' commandline option to be accepted */
#ifdef CONFIG_IBMMCA_SCSI_ORDER_STANDARD
static char ibm_ansi_order = 1;
#else
static char ibm_ansi_order = 0;
#endif
static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
static void internal_done(Scsi_Cmnd * cmd);
static void check_devices(struct Scsi_Host *, int);
static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
static int immediate_reset(struct Scsi_Host *, unsigned int);
#endif
static int device_inquiry(struct Scsi_Host *, int);
static int read_capacity(struct Scsi_Host *, int);
static int get_pos_info(struct Scsi_Host *);
static char *ti_p(int);
static char *ti_l(int);
static char *ibmrate(unsigned int, int);
static int probe_display(int);
static int probe_bus_mode(struct Scsi_Host *);
static int device_exists(struct Scsi_Host *, int, int *, int *);
static int option_setup(char *);
/* local functions needed for proc_info */
static int ldn_access_load(struct Scsi_Host *, int);
static int ldn_access_total_read_write(struct Scsi_Host *);
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
unsigned int intr_reg;
unsigned int cmd_result;
unsigned int ldn;
unsigned long flags;
Scsi_Cmnd *cmd;
int lastSCSI;
struct device *dev = dev_id;
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
spin_lock_irqsave(shpnt->host_lock, flags);
if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_NONE;
}
/* the reset-function already did all the job, even ints got
renabled on the subsystem, so just return */
if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
cpu_relax();
}
/*get command result and logical device */
intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
cmd_result = intr_reg & 0xf0;
ldn = intr_reg & 0x0f;
/* get the last_scsi_command here */
lastSCSI = last_scsi_command(shpnt)[ldn];
outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
switch (cmd_result) {
/* Prevent from Ooopsing on error to show the real reason */
case IM_ADAPTER_HW_FAILURE:
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
if (ld(shpnt)[ldn].cmd)
printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
else
printk("none,");
if (ld(shpnt)[ldn].cmd)
printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
else
printk("Blocksize=none");
printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
if (ld(shpnt)[ldn].cmd) {
printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
}
printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
/* if errors appear, enter this section to give detailed info */
printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
}
printk(KERN_ERR " Send this report to the maintainer.\n");
panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
break;
}
} else {
/* The command error handling is made silent, but we tell the
* calling function, that there is a reported error from the
* adapter. */
switch (cmd_result) {
case IM_ADAPTER_HW_FAILURE:
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
global_command_error_excuse = CMD_FAIL;
break;
default:
global_command_error_excuse = 0;
break;
}
}
/* if no panic appeared, increase the interrupt-counter */
IBM_DS(shpnt).total_interrupts++;
/*only for local checking phase */
if (local_checking_phase_flag(shpnt)) {
stat_result(shpnt) = cmd_result;
got_interrupt(shpnt) = 1;
reset_status(shpnt) = IM_RESET_FINISHED_OK;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/* handling of commands coming from upper level of scsi driver */
if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
/* verify ldn, and may handle rare reset immediate command */
if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
} else {
/*reset disk led counter, turn off disk led */
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_OK;
}
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
/* react on SCSI abort command */
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
#endif
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
cmd = ld(shpnt)[ldn].cmd;
ld(shpnt)[ldn].cmd = NULL;
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_ABORT << 16;
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd); /* should be the internal_done */
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
reset_status(shpnt) = IM_RESET_FINISHED_OK;
stat_result(shpnt) = cmd_result;
last_scsi_command(shpnt)[ldn] = NO_SCSI;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
}
last_scsi_command(shpnt)[ldn] = NO_SCSI;
last_scsi_type(shpnt)[ldn] = 0;
cmd = ld(shpnt)[ldn].cmd;
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_TIMEOUT
if (cmd) {
if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
spin_unlock_irqsave(shpnt->host_lock, flags);
printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
return IRQ_HANDLED;
}
}
#endif
/*if no command structure, just return, else clear cmd */
if (!cmd)
{
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
#ifdef IM_DEBUG_INT
printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
#endif
/*if this is end of media read/write, may turn off PS/2 disk led */
if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
/* only access this, if there was a valid device addressed */
if (--disk_rw_in_progress == 0)
PS2_DISK_LED_OFF();
}
/* IBM describes the status-mask to be 0x1e, but this is not conform
* with SCSI-definition, I suppose, the reason for it is that IBM
* adapters do not support CMD_TERMINATED, TASK_SET_FULL and
* ACA_ACTIVE as returning statusbyte information. (ML) */
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
IBM_DS(shpnt).total_errors++;
} else
cmd->result = 0;
/* write device status into cmd->result, and call done function */
if (lastSCSI == NO_SCSI) { /* unexpected interrupt :-( */
cmd->result |= DID_BAD_INTR << 16;
printk("IBM MCA SCSI: WARNING - Interrupt from non-pending SCSI-command!\n");
} else /* things went right :-) */
cmd->result |= DID_OK << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
unsigned char attn_reg)
{
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
spin_lock_irqsave(shpnt->host_lock, flags);
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irqrestore(shpnt->host_lock, flags);
}
/* write registers and enable system interrupts */
outl(cmd_reg, IM_CMD_REG(shpnt));
outb(attn_reg, IM_ATTN_REG(shpnt));
spin_unlock_irqrestore(shpnt->host_lock, flags);
}
static void internal_done(Scsi_Cmnd * cmd)
{
cmd->SCp.Status++;
return;
}
/* SCSI-SCB-command for device_inquiry */
static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[ldn].scb);
tsb = &(ld(shpnt)[ldn].tsb);
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
for (retr = 0; retr < 3; retr++) {
/* fill scb with inquiry command */
scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
scb->tsb_adr = isa_virt_to_bus(tsb);
/* issue scb to passed ldn, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
static int read_capacity(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[ldn].scb);
tsb = &(ld(shpnt)[ldn].tsb);
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
ld(shpnt)[ldn].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with read capacity command */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 8;
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to passed ldn, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
while (!got_interrupt(shpnt))
barrier();
/*if got capacity, get block length and return one device found */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
static int get_pos_info(struct Scsi_Host *shpnt)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with get_pos_info command */
scb->command = IM_GET_POS_INFO_CMD;
scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
if (special(shpnt) == IBM_SCSI2_FW)
scb->sys_buf_length = 256; /* get all info from F/W adapter */
else
scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to ldn=15, and busy wait for interrupt */
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
/* FIXME: timeout */
while (!got_interrupt(shpnt))
barrier();
/*if got POS-stuff, get block length and return one device found */
if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/* if all three retries failed, return "no device at this ldn" */
if (retr >= 3)
return 0;
else
return 1;
}
/* SCSI-immediate-command for assign. This functions maps/unmaps specific
ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
subsystem and for dynamical remapping od ldns. */
static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
unsigned int lun, unsigned int ldn,
unsigned int operation)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
switch (special(shpnt)) {
case IBM_SCSI2_FW:
imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
imm_cmd |= (unsigned long) ((operation & 1) << 23);
imm_cmd |= (unsigned long) ((pun & 7) << 20) | ((pun & 8) << 24);
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
default:
imm_cmd = inl(IM_CMD_REG(shpnt));
imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
imm_cmd |= (unsigned long) ((operation & 1) << 23);
imm_cmd |= (unsigned long) ((pun & 7) << 20);
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
}
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
return 0;
else
return 1;
}
static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
imm_cmd = IM_FEATURE_CTR_IMM_CMD;
imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
/* we need to run into command errors in order to probe for the
* right speed! */
global_command_error_excuse = 1;
issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
/* FIXME: timeout */
while (!got_interrupt(shpnt))
barrier();
if (global_command_error_excuse == CMD_FAIL) {
global_command_error_excuse = 0;
return 2;
} else
global_command_error_excuse = 0;
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
return 0;
else
return 1;
}
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
{
int retries;
int ticks;
unsigned long imm_command;
for (retries = 0; retries < 3; retries++) {
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
got_interrupt(shpnt) = 0;
reset_status(shpnt) = IM_RESET_IN_PROGRESS;
issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
ticks = IM_RESET_DELAY * HZ;
while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just complain */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
reset_status(shpnt) = IM_RESET_FINISHED_OK;
/* did not work, finish */
return 1;
}
/*if command successful, break */
if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retries >= 3)
return 0;
else
return 1;
}
#endif
/* type-interpreter for physical device numbers */
static char *ti_p(int dev)
{
switch (dev) {
case TYPE_IBM_SCSI_ADAPTER:
return ("A");
case TYPE_DISK:
return ("D");
case TYPE_TAPE:
return ("T");
case TYPE_PROCESSOR:
return ("P");
case TYPE_WORM:
return ("W");
case TYPE_ROM:
return ("R");
case TYPE_SCANNER:
return ("S");
case TYPE_MOD:
return ("M");
case TYPE_MEDIUM_CHANGER:
return ("C");
case TYPE_NO_LUN:
return ("+"); /* show NO_LUN */
}
return ("-"); /* TYPE_NO_DEVICE and others */
}
/* interpreter for logical device numbers (ldn) */
static char *ti_l(int val)
{
const char hex[16] = "0123456789abcdef";
static char answer[2];
answer[1] = (char) (0x0);
if (val <= MAX_LOG_DEV)
answer[0] = hex[val];
else
answer[0] = '-';
return (char *) &answer;
}
/* transfers bitpattern of the feature command to values in MHz */
static char *ibmrate(unsigned int speed, int i)
{
switch (speed) {
case 0:
return i ? "5.00" : "10.00";
case 1:
return i ? "4.00" : "8.00";
case 2:
return i ? "3.33" : "6.66";
case 3:
return i ? "2.86" : "5.00";
case 4:
return i ? "2.50" : "4.00";
case 5:
return i ? "2.22" : "3.10";
case 6:
return i ? "2.00" : "2.50";
case 7:
return i ? "1.82" : "2.00";
}
return "---";
}
static int probe_display(int what)
{
static int rotator = 0;
const char rotor[] = "|/-\\";
if (!(display_mode & LED_DISP))
return 0;
if (!what) {
outl(0x20202020, MOD95_LED_PORT);
outl(0x20202020, MOD95_LED_PORT + 4);
} else {
outb('S', MOD95_LED_PORT + 7);
outb('C', MOD95_LED_PORT + 6);
outb('S', MOD95_LED_PORT + 5);
outb('I', MOD95_LED_PORT + 4);
outb('i', MOD95_LED_PORT + 3);
outb('n', MOD95_LED_PORT + 2);
outb('i', MOD95_LED_PORT + 1);
outb((char) (rotor[rotator]), MOD95_LED_PORT);
rotator++;
if (rotator > 3)
rotator = 0;
}
return 0;
}
static int probe_bus_mode(struct Scsi_Host *shpnt)
{
struct im_pos_info *info;
int num_bus = 0;
int ldn;
info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
if (get_pos_info(shpnt)) {
if (info->connector_size & 0xf000)
subsystem_connector_size(shpnt) = 16;
else
subsystem_connector_size(shpnt) = 32;
num_bus |= (info->pos_4b & 8) >> 3;
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
if (!((info->cache_stat >> ldn) & 1))
ld(shpnt)[ldn].cache_flag = 0;
}
if (!((info->retry_stat >> ldn) & 1))
ld(shpnt)[ldn].retry_flag = 0;
}
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: SCSI-Cache bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
printk("%d", ld(shpnt)[ldn].cache_flag);
}
printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
printk("%d", ld(shpnt)[ldn].retry_flag);
}
printk("\n");
#endif
}
return num_bus;
}
/* probing scsi devices */
static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
{
int id, lun, ldn, ticks;
int count_devices; /* local counter for connected device */
int max_pun;
int num_bus;
int speedrun; /* local adapter_speed check variable */
/* assign default values to certain variables */
ticks = 0;
count_devices = 0;
IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
/* initialize the very important driver-informational arrays/structs */
memset(ld(shpnt), 0, sizeof(ld(shpnt)));
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
last_scsi_type(shpnt)[ldn] = 0;
ld(shpnt)[ldn].cache_flag = 1;
ld(shpnt)[ldn].retry_flag = 1;
}
memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
for (lun = 0; lun < 8; lun++) {
/* mark the adapter at its pun on all luns */
get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
ldn is active for all
luns. */
}
probe_display(0); /* Supercool display usage during SCSI-probing. */
/* This makes sense, when booting without any */
/* monitor connected on model XX95. */
/* STEP 1: */
adapter_speed(shpnt) = global_adapter_speed;
speedrun = adapter_speed(shpnt);
while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
probe_display(1);
if (speedrun == 7)
panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
speedrun++;
if (speedrun > 7)
speedrun = 7;
}
adapter_speed(shpnt) = speedrun;
/* Get detailed information about the current adapter, necessary for
* device operations: */
num_bus = probe_bus_mode(shpnt);
/* num_bus contains only valid data for the F/W adapter! */
if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
/* F/W adapter PUN-space extension evaluation: */
if (num_bus) {
printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
subsystem_maxid(shpnt) = 16;
} else {
printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
subsystem_maxid(shpnt) = 8;
}
printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
} else /* all other IBM SCSI adapters: */
printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
/* assign correct PUN device space */
max_pun = subsystem_maxid(shpnt);
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
#else
printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
#endif
for (ldn = 0; ldn < MAX_LOG_DEV; ldn++) {
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
}
lun = 0; /* default lun is 0 */
#ifndef IM_DEBUG_PROBE
printk("cleared,");
#endif
/* STEP 2: */
#ifdef IM_DEBUG_PROBE
printk("\nIBM MCA SCSI: Scanning SCSI-devices.");
#endif
for (id = 0; id < max_pun; id++)
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
{
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
if (id != subsystem_pun(shpnt)) {
/* if pun is not the adapter: */
/* set ldn=0 to pun,lun */
immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
/* entry, even for NO_LUN */
if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
count_devices++; /* a existing device is found */
}
/* remove ldn */
immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
}
}
#ifndef IM_DEBUG_PROBE
printk("scanned,");
#endif
/* STEP 3: */
#ifdef IM_DEBUG_PROBE
printk("\nIBM MCA SCSI: Mapping SCSI-devices.");
#endif
ldn = 0;
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8 && ldn < MAX_LOG_DEV; lun++)
#endif
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
probe_display(1);
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
if (id != subsystem_pun(shpnt)) {
if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
/* Only map if accepted type. Always enter for
lun == 0 to get no gaps into ldn-mapping for ldn<7. */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
printk("resetting device at ldn=%x ... ", ldn);
immediate_reset(shpnt, ldn);
#endif
ldn++;
} else {
/* device vanished, probably because we don't know how to
* handle it or because it has problems */
if (lun > 0) {
/* remove mapping */
get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
} else
ldn++;
}
} else if (lun == 0) {
/* map lun == 0, even if no device exists */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
ldn++;
}
}
}
/* STEP 4: */
/* map remaining ldns to non-existing devices */
for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
probe_display(1);
/* Map remaining ldns only to NON-existing pun,lun
combinations to make sure an inquiry will fail.
For MULTI_LUN, it is needed to avoid adapter autonome
SCSI-remapping. */
immediate_assign(shpnt, id, lun, ldn, SET_LDN);
get_ldn(shpnt)[id][lun] = ldn;
ldn++;
}
}
#ifndef IM_DEBUG_PROBE
printk("mapped.");
#endif
printk("\n");
#ifdef IM_DEBUG_PROBE
if (ibm_ansi_order)
printk("IBM MCA SCSI: Device order: IBM/ANSI (pun=7 is first).\n");
else
printk("IBM MCA SCSI: Device order: New Industry Standard (pun=0 is first).\n");
#endif
#ifdef IM_DEBUG_PROBE
/* Show the physical and logical mapping during boot. */
printk("IBM MCA SCSI: Determined SCSI-device-mapping:\n");
printk(" Physical SCSI-Device Map Logical SCSI-Device Map\n");
printk("ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
printk("%2d ", id);
for (lun = 0; lun < 8; lun++)
printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
printk(" %2d ", id);
for (lun = 0; lun < 8; lun++)
printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
printk("\n");
}
#endif
/* assign total number of found SCSI-devices to the statistics struct */
IBM_DS(shpnt).total_scsi_devices = count_devices;
/* decide for output in /proc-filesystem, if the configuration of
SCSI-devices makes dynamical reassignment of devices necessary */
if (count_devices >= MAX_LOG_DEV)
IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
else
IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
/* If no SCSI-devices are assigned, return 1 in order to cause message. */
if (ldn == 0)
printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
/* reset the counters for statistics on the current adapter */
IBM_DS(shpnt).scbs = 0;
IBM_DS(shpnt).long_scbs = 0;
IBM_DS(shpnt).total_accesses = 0;
IBM_DS(shpnt).total_interrupts = 0;
IBM_DS(shpnt).dynamical_assignments = 0;
memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
probe_display(0);
return;
}
static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
{
unsigned char *buf;
/* if no valid device found, return immediately with 0 */
if (!(device_inquiry(shpnt, ldn)))
return 0;
buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
if (*buf == TYPE_ROM) {
*device_type = TYPE_ROM;
*block_length = 2048; /* (standard blocksize for yellow-/red-book) */
return 1;
}
if (*buf == TYPE_WORM) {
*device_type = TYPE_WORM;
*block_length = 2048;
return 1;
}
if (*buf == TYPE_DISK) {
*device_type = TYPE_DISK;
if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
return 0;
}
if (*buf == TYPE_MOD) {
*device_type = TYPE_MOD;
if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
return 0;
}
if (*buf == TYPE_TAPE) {
*device_type = TYPE_TAPE;
*block_length = 0; /* not in use (setting by mt and mtst in op.) */
return 1;
}
if (*buf == TYPE_PROCESSOR) {
*device_type = TYPE_PROCESSOR;
*block_length = 0; /* they set their stuff on drivers */
return 1;
}
if (*buf == TYPE_SCANNER) {
*device_type = TYPE_SCANNER;
*block_length = 0; /* they set their stuff on drivers */
return 1;
}
if (*buf == TYPE_MEDIUM_CHANGER) {
*device_type = TYPE_MEDIUM_CHANGER;
*block_length = 0; /* One never knows, what to expect on a medium
changer device. */
return 1;
}
return 0;
}
static void internal_ibmmca_scsi_setup(char *str, int *ints)
{
int i, j, io_base, id_base;
char *token;
io_base = 0;
id_base = 0;
if (str) {
j = 0;
while ((token = strsep(&str, ",")) != NULL) {
if (!strcmp(token, "activity"))
display_mode |= LED_ACTIVITY;
if (!strcmp(token, "display"))
display_mode |= LED_DISP;
if (!strcmp(token, "adisplay"))
display_mode |= LED_ADISP;
if (!strcmp(token, "normal"))
ibm_ansi_order = 0;
if (!strcmp(token, "ansi"))
ibm_ansi_order = 1;
if (!strcmp(token, "fast"))
global_adapter_speed = 0;
if (!strcmp(token, "medium"))
global_adapter_speed = 4;
if (!strcmp(token, "slow"))
global_adapter_speed = 7;
if ((*token == '-') || (isdigit(*token))) {
if (!(j % 2) && (io_base < IM_MAX_HOSTS))
io_port[io_base++] = simple_strtoul(token, NULL, 0);
if ((j % 2) && (id_base < IM_MAX_HOSTS))
scsi_id[id_base++] = simple_strtoul(token, NULL, 0);
j++;
}
}
} else if (ints) {
for (i = 0; i < IM_MAX_HOSTS && 2 * i + 2 < ints[0]; i++) {
io_port[i] = ints[2 * i + 2];
scsi_id[i] = ints[2 * i + 2];
}
}
return;
}
#if 0
FIXME NEED TO MOVE TO SYSFS
static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
{
struct Scsi_Host *shpnt;
int len, speciale, connectore, k;
unsigned int pos[8];
unsigned long flags;
struct Scsi_Host *dev = dev_id;
spin_lock_irqsave(dev->host_lock, flags);
shpnt = dev; /* assign host-structure to local pointer */
len = 0; /* set filled text-buffer index to 0 */
/* get the _special contents of the hostdata structure */
speciale = ((struct ibmmca_hostdata *) shpnt->hostdata)->_special;
connectore = ((struct ibmmca_hostdata *) shpnt->hostdata)->_connector_size;
for (k = 2; k < 4; k++)
pos[k] = ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k];
if (speciale == FORCED_DETECTION) { /* forced detection */
len += sprintf(buf + len,
"Adapter category: forced detected\n" "***************************************\n" "*** Forced detected SCSI Adapter ***\n" "*** No chip-information available ***\n" "***************************************\n");
} else if (speciale == INTEGRATED_SCSI) {
/* if the integrated subsystem has been found automatically: */
len += sprintf(buf + len,
"Adapter category: integrated\n" "Chip revision level: %d\n" "Chip status: %s\n" "8 kByte NVRAM status: %s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 1) ? "enabled" : "disabled", (pos[2] & 2) ? "locked" : "accessible");
} else if ((speciale >= 0) && (speciale < ARRAY_SIZE(subsys_list))) {
/* if the subsystem is a slot adapter */
len += sprintf(buf + len, "Adapter category: slot-card\n" "ROM Segment Address: ");
if ((pos[2] & 0xf0) == 0xf0)
len += sprintf(buf + len, "off\n");
else
len += sprintf(buf + len, "0x%x\n", ((pos[2] & 0xf0) << 13) + 0xc0000);
len += sprintf(buf + len, "Chip status: %s\n", (pos[2] & 1) ? "enabled" : "disabled");
len += sprintf(buf + len, "Adapter I/O Offset: 0x%x\n", ((pos[2] & 0x0e) << 2));
} else {
len += sprintf(buf + len, "Adapter category: unknown\n");
}
/* common subsystem information to write to the slotn file */
len += sprintf(buf + len, "Subsystem PUN: %d\n", shpnt->this_id);
len += sprintf(buf + len, "I/O base address range: 0x%x-0x%x\n", (unsigned int) (shpnt->io_port), (unsigned int) (shpnt->io_port + 7));
len += sprintf(buf + len, "MCA-slot size: %d bits", connectore);
/* Now make sure, the bufferlength is devidable by 4 to avoid
* paging problems of the buffer. */
while (len % sizeof(int) != (sizeof(int) - 1))
len += sprintf(buf + len, " ");
len += sprintf(buf + len, "\n");
spin_unlock_irqrestore(shpnt->host_lock, flags);
return len;
}
#endif
static struct scsi_host_template ibmmca_driver_template = {
.proc_name = "ibmmca",
.proc_info = ibmmca_proc_info,
.name = "IBM SCSI-Subsystem",
.queuecommand = ibmmca_queuecommand,
.eh_abort_handler = ibmmca_abort,
.eh_host_reset_handler = ibmmca_host_reset,
.bios_param = ibmmca_biosparam,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = 16,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int ibmmca_probe(struct device *dev)
{
struct Scsi_Host *shpnt;
int port, id, i, j, k, irq, enabled, ret = -EINVAL;
struct mca_device *mca_dev = to_mca_device(dev);
const char *description = ibmmca_description[mca_dev->index];
/* First of all, print the version number of the driver. This is
* important to allow better user bugreports in case of already
* having problems with the MCA_bus probing. */
printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
/* The POS2-register of all PS/2 model SCSI-subsystems has the following
* interpretation of bits:
* Bit 7 - 4 : Chip Revision ID (Release)
* Bit 3 - 2 : Reserved
* Bit 1 : 8k NVRAM Disabled
* Bit 0 : Chip Enable (EN-Signal)
* The POS3-register is interpreted as follows:
* Bit 7 - 5 : SCSI ID
* Bit 4 : Reserved = 0
* Bit 3 - 0 : Reserved = 0
* (taken from "IBM, PS/2 Hardware Interface Technical Reference, Common
* Interfaces (1991)").
* In short words, this means, that IBM PS/2 machines only support
* 1 single subsystem by default. The slot-adapters must have another
* configuration on pos2. Here, one has to assume the following
* things for POS2-register:
* Bit 7 - 4 : Chip Revision ID (Release)
* Bit 3 - 1 : port offset factor
* Bit 0 : Chip Enable (EN-Signal)
* As I found a patch here, setting the IO-registers to 0x3540 forced,
* as there was a 0x05 in POS2 on a model 56, I assume, that the
* port 0x3540 must be fix for integrated SCSI-controllers.
* Ok, this discovery leads to the following implementation: (M.Lang) */
/* first look for the IBM SCSI integrated subsystem on the motherboard */
for (j = 0; j < 8; j++) /* read the pos-information */
pos[j] = mca_device_read_pos(mca_dev, j);
id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
enabled = (pos[2] &0x01);
if (!enabled) {
printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
printk(KERN_WARNING " SCSI-operations may not work.\n");
}
/* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
* if we ignore the settings of all surrounding pos registers, it is not
* completely sufficient to only check pos2 and pos3. */
/* Therefore, now the following if statement is used to
* make sure, we see a real integrated onboard SCSI-interface and no
* internal system information, which gets mapped to some pos registers
* on models 95xx. */
if (mca_dev->slot == MCA_INTEGSCSI &&
((!pos[0] && !pos[1] && pos[2] > 0 &&
pos[3] > 0 && !pos[4] && !pos[5] &&
!pos[6] && !pos[7]) ||
(pos[0] == 0xff && pos[1] == 0xff &&
pos[2] < 0xff && pos[3] < 0xff &&
pos[4] == 0xff && pos[5] == 0xff &&
pos[6] == 0xff && pos[7] == 0xff))) {
irq = IM_IRQ;
port = IM_IO_PORT;
} else {
irq = IM_IRQ;
port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
printk(KERN_ERR " Impossible to determine adapter PUN!\n");
printk(KERN_ERR " Guessing adapter PUN = 7.\n");
id = 7;
} else {
id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
if (mca_dev->index == IBM_SCSI2_FW) {
id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
* for F/W adapters */
}
}
if ((mca_dev->index == IBM_SCSI2_FW) &&
(pos[4] & 0x01) && (pos[6] == 0)) {
/* IRQ11 is used by SCSI-2 F/W Adapter/A */
printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
irq = IM_IRQ_FW;
}
}
/* give detailed information on the subsystem. This helps me
* additionally during debugging and analyzing bug-reports. */
printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
description, port, id);
if (mca_dev->slot == MCA_INTEGSCSI)
printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
else {
if ((pos[2] & 0xf0) == 0xf0)
printk(KERN_DEBUG " ROM Addr.=off,");
else
printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
}
/* check I/O region */
if (!request_region(port, IM_N_IO_PORT, description)) {
printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
goto out_fail;
}
/* register host */
shpnt = scsi_host_alloc(&ibmmca_driver_template,
sizeof(struct ibmmca_hostdata));
if (!shpnt) {
printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
goto out_release;
}
dev_set_drvdata(dev, shpnt);
if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
goto out_free_host;
}
/* request I/O region */
special(shpnt) = mca_dev->index; /* important assignment or else crash! */
subsystem_connector_size(shpnt) = 0; /* preset slot-size */
shpnt->irq = irq; /* assign necessary stuff for the adapter */
shpnt->io_port = port;
shpnt->n_io_port = IM_N_IO_PORT;
shpnt->this_id = id;
shpnt->max_id = 8; /* 8 PUNs are default */
/* now, the SCSI-subsystem is connected to Linux */
#ifdef IM_DEBUG_PROBE
ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
printk("IBM MCA SCSI: This adapters' POS-registers: ");
for (i = 0; i < 8; i++)
printk("%x ", pos[i]);
printk("\n");
#endif
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
for (i = 0; i < 16; i++) /* reset the tables */
for (j = 0; j < 8; j++)
get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
/* check which logical devices exist */
/* after this line, local interrupting is possible: */
local_checking_phase_flag(shpnt) = 1;
check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
local_checking_phase_flag(shpnt) = 0;
/* an ibm mca subsystem has been detected */
for (k = 2; k < 7; k++)
((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
mca_device_set_name(mca_dev, description);
/* FIXME: NEED TO REPLUMB TO SYSFS
mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
*/
mca_device_set_claim(mca_dev, 1);
if (scsi_add_host(shpnt, dev)) {
dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
goto out_free_host;
}
scsi_scan_host(shpnt);
return 0;
out_free_host:
scsi_host_put(shpnt);
out_release:
release_region(port, IM_N_IO_PORT);
out_fail:
return ret;
}
static int __devexit ibmmca_remove(struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
scsi_remove_host(shpnt);
release_region(shpnt->io_port, shpnt->n_io_port);
free_irq(shpnt->irq, dev);
scsi_host_put(shpnt);
return 0;
}
/* The following routine is the SCSI command queue for the midlevel driver */
static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
unsigned int ldn;
unsigned int scsi_cmd;
struct im_scb *scb;
struct Scsi_Host *shpnt;
int current_ldn;
int id, lun;
int target;
int max_pun;
int i;
struct scatterlist *sg;
shpnt = cmd->device->host;
max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* if (target,lun) is NO LUN or not existing at all, return error */
if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
cmd->result = DID_NO_CONNECT << 16;
if (done)
done(cmd);
return 0;
}
/*if (target,lun) unassigned, do further checks... */
ldn = get_ldn(shpnt)[target][cmd->device->lun];
if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
current_ldn = next_ldn(shpnt); /* stop-value for one circle */
while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
/* command-processing ldn. */
next_ldn(shpnt)++;
if (next_ldn(shpnt) >= MAX_LOG_DEV)
next_ldn(shpnt) = 7;
if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
/* no non-processing ldn found */
scmd_printk(KERN_WARNING, cmd,
"IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
" On ldn 7-14 SCSI-commands everywhere in progress.\n"
" Reporting DID_NO_CONNECT for device.\n");
cmd->result = DID_NO_CONNECT << 16; /* return no connect */
if (done)
done(cmd);
return 0;
}
}
/* unmap non-processing ldn */
for (id = 0; id < max_pun; id++)
for (lun = 0; lun < 8; lun++) {
if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
/* unmap entry */
}
}
/* set reduced interrupt_handler-mode for checking */
local_checking_phase_flag(shpnt) = 1;
/* map found ldn to pun,lun */
get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
/* change ldn to the right value, that is now next_ldn */
ldn = next_ldn(shpnt);
/* unassign all ldns (pun,lun,ldn does not matter for remove) */
immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* set only LDN for remapped device */
immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
/* get device information for ld[ldn] */
if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
devices that were not assigned,
should have nothing in progress. */
get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
/* increase assignment counters for statistics in /proc */
IBM_DS(shpnt).dynamical_assignments++;
IBM_DS(shpnt).ldn_assignments[ldn]++;
} else
/* panic here, because a device, found at boottime has
vanished */
panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
/* unassign again all ldns (pun,lun,ldn does not matter for remove) */
immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* remap all ldns, as written in the pun/lun table */
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
for (id = 0; id < max_pun; id++) {
if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
}
/* set back to normal interrupt_handling */
local_checking_phase_flag(shpnt) = 0;
#ifdef IM_DEBUG_PROBE
/* Information on syslog terminal */
printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
#endif
/* increase next_ldn for next dynamical assignment */
next_ldn(shpnt)++;
if (next_ldn(shpnt) >= MAX_LOG_DEV)
next_ldn(shpnt) = 7;
} else { /* wall against Linux accesses to the subsystem adapter */
cmd->result = DID_BAD_TARGET << 16;
if (done)
done(cmd);
return 0;
}
}
/*verify there is no command already in progress for this log dev */
if (ld(shpnt)[ldn].cmd)
panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
/*save done in cmd, and save cmd for the interrupt handler */
cmd->scsi_done = done;
ld(shpnt)[ldn].cmd = cmd;
/*fill scb information independent of the scsi command */
scb = &(ld(shpnt)[ldn].scb);
ld(shpnt)[ldn].tsb.dev_status = 0;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
scsi_cmd = cmd->cmnd[0];
if (scsi_sg_count(cmd)) {
BUG_ON(scsi_sg_count(cmd) > 16);
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
} else {
scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
/* recent Linux midlevel SCSI places 1024 byte for inquiry
* command. Far too much for old PS/2 hardware. */
switch (scsi_cmd) {
/* avoid command errors by setting bufferlengths to
* ANSI-standard. Beware of forcing it to 255,
* this could SEGV the kernel!!! */
case INQUIRY:
case REQUEST_SENSE:
case MODE_SENSE:
case MODE_SELECT:
if (scsi_bufflen(cmd) > 255)
scb->sys_buf_length = 255;
else
scb->sys_buf_length = scsi_bufflen(cmd);
break;
case TEST_UNIT_READY:
scb->sys_buf_length = 0;
break;
default:
scb->sys_buf_length = scsi_bufflen(cmd);
break;
}
}
/*fill scb information dependent on scsi command */
#ifdef IM_DEBUG_CMD
printk("issue scsi cmd=%02x to ldn=%d\n", scsi_cmd, ldn);
#endif
/* for specific device-type debugging: */
#ifdef IM_DEBUG_CMD_SPEC_DEV
if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
#endif
/* for possible panics store current command */
last_scsi_command(shpnt)[ldn] = scsi_cmd;
last_scsi_type(shpnt)[ldn] = IM_SCB;
/* update statistical info */
IBM_DS(shpnt).total_accesses++;
IBM_DS(shpnt).ldn_access[ldn]++;
switch (scsi_cmd) {
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
/* Distinguish between disk and other devices. Only disks (that are the
most frequently accessed devices) should be supported by the
IBM-SCSI-Subsystem commands. */
switch (ld(shpnt)[ldn].device_type) {
case TYPE_DISK: /* for harddisks enter here ... */
case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
/* you like, if this won't work.) */
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
/* read command preparations */
scb->enable |= IM_READ_CONTROL;
IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
} else { /* write command preparations */
IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
}
if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[3]) << 0) | (((unsigned) cmd->cmnd[2]) << 8) | ((((unsigned) cmd->cmnd[1]) & 0x1f) << 16);
scb->u2.blk.count = (unsigned) cmd->cmnd[4];
} else {
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
}
last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
scb->u2.blk.length = ld(shpnt)[ldn].block_length;
break;
/* for other devices, enter here. Other types are not known by
Linux! TYPE_NO_LUN is forbidden as valid device. */
case TYPE_ROM:
case TYPE_TAPE:
case TYPE_PROCESSOR:
case TYPE_WORM:
case TYPE_SCANNER:
case TYPE_MEDIUM_CHANGER:
/* If there is a sequential-device, IBM recommends to use
IM_OTHER_SCSI_CMD_CMD instead of subsystem READ/WRITE.
This includes CD-ROM devices, too, due to the partial sequential
read capabilities. */
scb->command = IM_OTHER_SCSI_CMD_CMD;
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12)
/* enable READ */
scb->enable |= IM_READ_CONTROL;
scb->enable |= IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
/* Read/write on this non-disk devices is also displayworthy,
so flash-up the LED/display. */
break;
}
break;
case INQUIRY:
IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
scb->command = IM_DEVICE_INQUIRY_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
break;
case TEST_UNIT_READY:
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
scb->u1.scsi_cmd_length = 6;
memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
case READ_CAPACITY:
/* the length of system memory buffer must be exactly 8 bytes */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable |= IM_READ_CONTROL | IM_BYPASS_BUFFER;
if (scb->sys_buf_length > 8)
scb->sys_buf_length = 8;
break;
/* Commands that need read-only-mode (system <- device): */
case REQUEST_SENSE:
scb->command = IM_REQUEST_SENSE_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
break;
/* Commands that need write-only-mode (system -> device): */
case MODE_SELECT:
case MODE_SELECT_10:
IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
/* For other commands, read-only is useful. Most other commands are
running without an input-data-block. */
default:
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
}
/*issue scb command, and return */
if (++disk_rw_in_progress == 1)
PS2_DISK_LED_ON(shpnt->host_no, target);
if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
IBM_DS(shpnt).long_scbs++;
} else {
issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
IBM_DS(shpnt).scbs++;
}
return 0;
}
static DEF_SCSI_QCMD(ibmmca_queuecommand)
static int __ibmmca_abort(Scsi_Cmnd * cmd)
{
/* Abort does not work, as the adapter never generates an interrupt on
* whatever situation is simulated, even when really pending commands
* are running on the adapters' hardware ! */
struct Scsi_Host *shpnt;
unsigned int ldn;
void (*saved_done) (Scsi_Cmnd *);
int target;
int max_pun;
unsigned long imm_command;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort subroutine called...\n");
#endif
shpnt = cmd->device->host;
max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* get logical device number, and disable system interrupts */
printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
ldn = get_ldn(shpnt)[target][cmd->device->lun];
/*if cmd for this ldn has already finished, no need to abort */
if (!ld(shpnt)[ldn].cmd) {
return SUCCESS;
}
/* Clear ld.cmd, save done function, install internal done,
* send abort immediate command (this enables sys. interrupts),
* and wait until the interrupt arrives.
*/
saved_done = cmd->scsi_done;
cmd->scsi_done = internal_done;
cmd->SCp.Status = 0;
last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
/* must wait for attention reg not busy */
/* FIXME - timeout, politeness */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
}
/* write registers and enable system interrupts */
outl(imm_command, IM_CMD_REG(shpnt));
outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
spin_unlock_irq(shpnt->host_lock);
while (!cmd->SCp.Status)
yield();
spin_lock_irq(shpnt->host_lock);
cmd->scsi_done = saved_done;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort returned with adapter response...\n");
#endif
/*if abort went well, call saved done, then return success or error */
if (cmd->result == (DID_ABORT << 16))
{
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
return SUCCESS;
} else {
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
return FAILED;
}
}
static int ibmmca_abort(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt = cmd->device->host;
int rc;
spin_lock_irq(shpnt->host_lock);
rc = __ibmmca_abort(cmd);
spin_unlock_irq(shpnt->host_lock);
return rc;
}
static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt;
Scsi_Cmnd *cmd_aid;
int ticks, i;
unsigned long imm_command;
BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
if (local_checking_phase_flag(shpnt)) {
printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
return FAILED;
}
/* issue reset immediate command to subsystem, and wait for interrupt */
printk("IBM MCA SCSI: resetting all devices.\n");
reset_status(shpnt) = IM_RESET_IN_PROGRESS;
last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
/* must wait for attention reg not busy */
while (1) {
if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irq(shpnt->host_lock);
yield();
spin_lock_irq(shpnt->host_lock);
}
/*write registers and enable system interrupts */
outl(imm_command, IM_CMD_REG(shpnt));
outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
/* wait for interrupt finished or intr_stat register to be set, as the
* interrupt will not be executed, while we are in here! */
/* FIXME: This is really really icky we so want a sleeping version of this ! */
while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just return an error */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
return FAILED;
}
if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
/* analysis done by this routine and not by the intr-routine */
if (inb(IM_INTR_REG(shpnt)) == 0xaf)
reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
else /* failed, 4get it */
reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
}
/* if reset failed, just return an error */
if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
return FAILED;
}
/* so reset finished ok - call outstanding done's, and return success */
printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
for (i = 0; i < MAX_LOG_DEV; i++) {
cmd_aid = ld(shpnt)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
ld(shpnt)[i].cmd = NULL;
cmd_aid->result = DID_RESET << 16;
}
}
return SUCCESS;
}
static int ibmmca_host_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *shpnt = cmd->device->host;
int rc;
spin_lock_irq(shpnt->host_lock);
rc = __ibmmca_host_reset(cmd);
spin_unlock_irq(shpnt->host_lock);
return rc;
}
static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info)
{
int size = capacity;
info[0] = 64;
info[1] = 32;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024) {
info[0] = 128;
info[1] = 63;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024) {
info[0] = 255;
info[1] = 63;
info[2] = size / (info[0] * info[1]);
if (info[2] >= 1024)
info[2] = 1023;
}
}
return 0;
}
/* calculate percentage of total accesses on a ldn */
static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
{
if (IBM_DS(shpnt).total_accesses == 0)
return (0);
if (IBM_DS(shpnt).ldn_access[ldn] == 0)
return (0);
return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
}
/* calculate total amount of r/w-accesses */
static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
return (a);
}
static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_inquiry_access[i];
return (a);
}
static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
a += IBM_DS(shpnt).ldn_modeselect_access[i];
return (a);
}
/* routine to display info in the proc-fs-structure (a deluxe feature) */
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
{
int len = 0;
int i, id, lun;
unsigned long flags;
int max_pun;
spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
max_pun = subsystem_maxid(shpnt);
len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
len += sprintf(buffer + len, " Device Scanning Order....: %s\n", (ibm_ansi_order) ? "IBM/ANSI" : "New Industry Standard");
#ifdef CONFIG_SCSI_MULTI_LUN
len += sprintf(buffer + len, " Multiple LUN probing.....: Yes\n");
#else
len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
#endif
len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
- ldn_access_total_modeselect(shpnt)
- ldn_access_total_inquiry(shpnt));
len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
for (i = 0; i <= MAX_LOG_DEV; i++)
len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
len += sprintf(buffer + len, "\n");
}
len += sprintf(buffer + len, "(A = IBM-Subsystem, D = Harddisk, T = Tapedrive, P = Processor, W = WORM,\n");
len += sprintf(buffer + len, " R = CD-ROM, S = Scanner, M = MO-Drive, C = Medium-Changer, + = unprovided LUN,\n");
len += sprintf(buffer + len, " - = nothing found, nothing assigned or unprobed LUN)\n\n");
*start = buffer + offset;
len -= offset;
if (len > length)
len = length;
spin_unlock_irqrestore(shpnt->host_lock, flags);
return len;
}
static int option_setup(char *str)
{
int ints[IM_MAX_HOSTS];
char *cur = str;
int i = 1;
while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)
cur++;
}
ints[0] = i - 1;
internal_ibmmca_scsi_setup(cur, ints);
return 1;
}
__setup("ibmmcascsi=", option_setup);
static struct mca_driver ibmmca_driver = {
.id_table = ibmmca_id_table,
.driver = {
.name = "ibmmca",
.bus = &mca_bus_type,
.probe = ibmmca_probe,
.remove = __devexit_p(ibmmca_remove),
},
};
static int __init ibmmca_init(void)
{
#ifdef MODULE
/* If the driver is run as module, read from conf.modules or cmd-line */
if (boot_options)
option_setup(boot_options);
#endif
return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
}
static void __exit ibmmca_exit(void)
{
mca_unregister_driver(&ibmmca_driver);
}
module_init(ibmmca_init);
module_exit(ibmmca_exit);
| gpl-2.0 |
amalappunni/msm8916_jalebi | drivers/md/dm-sysfs.c | 3598 | 2335 | /*
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include <linux/sysfs.h>
#include <linux/dm-ioctl.h>
#include "dm.h"
struct dm_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct mapped_device *, char *);
ssize_t (*store)(struct mapped_device *, char *);
};
#define DM_ATTR_RO(_name) \
struct dm_sysfs_attr dm_attr_##_name = \
__ATTR(_name, S_IRUGO, dm_attr_##_name##_show, NULL)
static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
{
struct dm_sysfs_attr *dm_attr;
struct mapped_device *md;
ssize_t ret;
dm_attr = container_of(attr, struct dm_sysfs_attr, attr);
if (!dm_attr->show)
return -EIO;
md = dm_get_from_kobject(kobj);
if (!md)
return -EINVAL;
ret = dm_attr->show(md, page);
dm_put(md);
return ret;
}
static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf)
{
if (dm_copy_name_and_uuid(md, buf, NULL))
return -EIO;
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
{
if (dm_copy_name_and_uuid(md, NULL, buf))
return -EIO;
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
{
sprintf(buf, "%d\n", dm_suspended_md(md));
return strlen(buf);
}
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
static DM_ATTR_RO(suspended);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
&dm_attr_suspended.attr,
NULL,
};
static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
};
/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
*/
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
.release = dm_kobject_release,
};
/*
* Initialize kobj
* because nobody using md yet, no need to call explicit dm_get/put
*/
int dm_sysfs_init(struct mapped_device *md)
{
return kobject_init_and_add(dm_kobject(md), &dm_ktype,
&disk_to_dev(dm_disk(md))->kobj,
"%s", "dm");
}
/*
* Remove kobj, called after all references removed
*/
void dm_sysfs_exit(struct mapped_device *md)
{
struct kobject *kobj = dm_kobject(md);
kobject_put(kobj);
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
| gpl-2.0 |
schqiushui/kernel_sense_kk_m8ace | drivers/mtd/ubi/cdev.c | 4622 | 25353 | /*
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file includes implementation of UBI character device operations.
*
* There are two kinds of character devices in UBI: UBI character devices and
* UBI volume character devices. UBI character devices allow users to
* manipulate whole volumes: create, remove, and re-size them. Volume character
* devices provide volume I/O capabilities.
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
*
* Well, there is the third kind of character devices - the UBI control
* character device, which allows to manipulate by UBI devices - create and
* delete them. In other words, it is used for attaching and detaching MTD
* devices.
*/
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/math64.h>
#include <mtd/ubi-user.h>
#include "ubi.h"
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
static int get_exclusive(struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
dbg_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
vol->exclusive = 1;
err = desc->mode;
desc->mode = UBI_EXCLUSIVE;
}
spin_unlock(&vol->ubi->volumes_lock);
return err;
}
/**
* revoke_exclusive - revoke exclusive mode.
* @desc: volume descriptor
* @mode: new mode to switch to
*/
static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
{
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
ubi_assert(vol->readers == 0 && vol->writers == 0);
ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
vol->exclusive = 0;
if (mode == UBI_READONLY)
vol->readers = 1;
else if (mode == UBI_READWRITE)
vol->writers = 1;
else
vol->exclusive = 1;
spin_unlock(&vol->ubi->volumes_lock);
desc->mode = mode;
}
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
int vol_id = iminor(inode) - 1, mode, ubi_num;
ubi_num = ubi_major2num(imajor(inode));
if (ubi_num < 0)
return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
dbg_gen("open device %d, volume %d, mode %d",
ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
file->private_data = desc;
return 0;
}
static int vol_cdev_release(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
dbg_gen("release device %d, volume %d, mode %d",
vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
dbg_gen("only %lld of %lld bytes received for atomic LEB change"
" for volume %d:%d, cancel", vol->upd_received,
vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
ubi_close_volume(desc);
return 0;
}
static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
loff_t new_offset;
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
dbg_err("updating");
return -EBUSY;
}
switch (origin) {
case 0: /* SEEK_SET */
new_offset = offset;
break;
case 1: /* SEEK_CUR */
new_offset = file->f_pos + offset;
break;
case 2: /* SEEK_END */
new_offset = vol->used_bytes + offset;
break;
default:
return -EINVAL;
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
dbg_err("bad seek %lld", new_offset);
return -EINVAL;
}
dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
vol->vol_id, offset, origin, new_offset);
file->f_pos = new_offset;
return new_offset;
}
static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
struct inode *inode = file->f_path.dentry->d_inode;
int err;
mutex_lock(&inode->i_mutex);
err = ubi_sync(ubi->ubi_num);
mutex_unlock(&inode->i_mutex);
return err;
}
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
if (vol->updating) {
dbg_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
dbg_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
return 0;
if (vol->corrupted)
dbg_gen("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
err = copy_to_user(buf, tbuf, len);
if (err) {
err = -EFAULT;
break;
}
buf += len;
len = count > tbuf_size ? tbuf_size : count;
} while (count);
vfree(tbuf);
return err ? err : count_save - count;
}
/*
* This function allows to directly write to dynamic UBI volumes, without
* issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
if (!vol->direct_writes)
return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
dbg_err("unaligned position");
return -EINVAL;
}
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
dbg_err("unaligned write length");
return -EINVAL;
}
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
while (count) {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = copy_from_user(tbuf, buf, len);
if (err) {
err = -EFAULT;
break;
}
err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
UBI_UNKNOWN);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
buf += len;
len = count > tbuf_size ? tbuf_size : count;
}
vfree(tbuf);
return err ? err : count_save - count;
}
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
if (vol->updating)
err = ubi_more_update_data(ubi, vol, buf, count);
else
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
ubi_err("cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
if (err) {
/*
* The operation is finished, @err contains number of actually
* written bytes.
*/
count = err;
if (vol->changing_leb) {
revoke_exclusive(desc, UBI_READWRITE);
return count;
}
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
if (err) {
ubi_warn("volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
return count;
}
static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Volume update command */
case UBI_IOCVOLUP:
{
int64_t bytes, rsvd_bytes;
if (!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
break;
}
err = copy_from_user(&bytes, argp, sizeof(int64_t));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY) {
err = -EROFS;
break;
}
rsvd_bytes = (long long)vol->reserved_pebs *
ubi->leb_size-vol->data_pad;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
}
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Atomic logical eraseblock change command */
case UBI_IOCEBCH:
{
struct ubi_leb_change_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_leb_change_req));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
req.dtype != UBI_UNKNOWN)
break;
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_leb_change(ubi, vol, &req);
if (req.bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
if (lnum < 0 || lnum >= vol->reserved_pebs) {
err = -EINVAL;
break;
}
dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
err = ubi_wl_flush(ubi);
break;
}
/* Logical eraseblock map command */
case UBI_IOCEBMAP:
{
struct ubi_map_req req;
err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_map(desc, req.lnum, req.dtype);
break;
}
/* Logical eraseblock un-map command */
case UBI_IOCEBUNMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_unmap(desc, lnum);
break;
}
/* Check if logical eraseblock is mapped command */
case UBI_IOCEBISMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_is_mapped(desc, lnum);
break;
}
/* Set volume property command */
case UBI_IOCSETVOLPROP:
{
struct ubi_set_vol_prop_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_set_vol_prop_req));
if (err) {
err = -EFAULT;
break;
}
switch (req.property) {
case UBI_VOL_PROP_DIRECT_WRITE:
mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
break;
}
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/**
* verify_mkvol_req - verify volume creation request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function zero if the request is correct, and %-EINVAL if not.
*/
static int verify_mkvol_req(const struct ubi_device *ubi,
const struct ubi_mkvol_req *req)
{
int n, err = -EINVAL;
if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
req->name_len < 0)
goto bad;
if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
req->vol_id != UBI_VOL_NUM_AUTO)
goto bad;
if (req->alignment == 0)
goto bad;
if (req->bytes == 0)
goto bad;
if (req->vol_type != UBI_DYNAMIC_VOLUME &&
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
if (req->alignment > ubi->leb_size)
goto bad;
n = req->alignment & (ubi->min_io_size - 1);
if (req->alignment != 1 && n)
goto bad;
if (!req->name[0] || !req->name_len)
goto bad;
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
}
n = strnlen(req->name, req->name_len + 1);
if (n != req->name_len)
goto bad;
return 0;
bad:
dbg_err("bad volume creation request");
ubi_dbg_dump_mkvol_req(req);
return err;
}
/**
* verify_rsvol_req - verify volume re-size request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function returns zero if the request is correct, and %-EINVAL if not.
*/
static int verify_rsvol_req(const struct ubi_device *ubi,
const struct ubi_rsvol_req *req)
{
if (req->bytes <= 0)
return -EINVAL;
if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
return -EINVAL;
return 0;
}
/**
* rename_volumes - rename UBI volumes.
* @ubi: UBI device description object
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
* the request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int rename_volumes(struct ubi_device *ubi,
struct ubi_rnvol_req *req)
{
int i, n, err;
struct list_head rename_list;
struct ubi_rename_entry *re, *re1;
if (req->count < 0 || req->count > UBI_MAX_RNVOL)
return -EINVAL;
if (req->count == 0)
return 0;
/* Validate volume IDs and names in the request */
for (i = 0; i < req->count; i++) {
if (req->ents[i].vol_id < 0 ||
req->ents[i].vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (req->ents[i].name_len < 0)
return -EINVAL;
if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
return -ENAMETOOLONG;
req->ents[i].name[req->ents[i].name_len] = '\0';
n = strlen(req->ents[i].name);
if (n != req->ents[i].name_len)
err = -EINVAL;
}
/* Make sure volume IDs and names are unique */
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
dbg_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
dbg_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
}
}
/* Create the re-name list */
INIT_LIST_HEAD(&rename_list);
for (i = 0; i < req->count; i++) {
int vol_id = req->ents[i].vol_id;
int name_len = req->ents[i].name_len;
const char *name = req->ents[i].name;
re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re) {
err = -ENOMEM;
goto out_free;
}
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
dbg_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
/* Skip this re-naming if the name does not really change */
if (re->desc->vol->name_len == name_len &&
!memcmp(re->desc->vol->name, name, name_len)) {
ubi_close_volume(re->desc);
kfree(re);
continue;
}
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
if (list_empty(&rename_list))
return 0;
/* Find out the volumes which have to be removed */
list_for_each_entry(re, &rename_list, list) {
struct ubi_volume_desc *desc;
int no_remove_needed = 0;
/*
* Volume @re->vol_id is going to be re-named to
* @re->new_name, while its current name is @name. If a volume
* with name @re->new_name currently exists, it has to be
* removed, unless it is also re-named in the request (@req).
*/
list_for_each_entry(re1, &rename_list, list) {
if (re->new_name_len == re1->desc->vol->name_len &&
!memcmp(re->new_name, re1->desc->vol->name,
re1->desc->vol->name_len)) {
no_remove_needed = 1;
break;
}
}
if (no_remove_needed)
continue;
/*
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
/* Re-naming into a non-existing volume name */
continue;
/* The volume exists but busy, or an error occurred */
dbg_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
ubi_close_volume(re->desc);
list_del(&re->list);
kfree(re);
}
return err;
}
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
switch (cmd) {
/* Create volume command */
case UBI_IOCMKVOL:
{
struct ubi_mkvol_req req;
dbg_gen("create volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_mkvol_req(ubi, &req);
if (err)
break;
mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
mutex_unlock(&ubi->device_mutex);
if (err)
break;
err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
break;
}
/* Remove volume command */
case UBI_IOCRMVOL:
{
int vol_id;
dbg_gen("remove volume");
err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
* 'struct ubi_volume' object will be freed when
* 'ubi_close_volume()' will call 'put_device()'.
*/
ubi_close_volume(desc);
break;
}
/* Re-size volume command */
case UBI_IOCRSVOL:
{
int pebs;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_rsvol_req(ubi, &req);
if (err)
break;
desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
/* Re-name volumes command */
case UBI_IOCRNVOL:
{
struct ubi_rnvol_req *req;
dbg_msg("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
break;
};
err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
if (err) {
err = -EFAULT;
kfree(req);
break;
}
err = rename_volumes(ubi, req);
kfree(req);
break;
}
default:
err = -ENOTTY;
break;
}
ubi_put_device(ubi);
return err;
}
static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
switch (cmd) {
/* Attach an MTD device command */
case UBI_IOCATT:
{
struct ubi_attach_req req;
struct mtd_info *mtd;
dbg_gen("attach MTD device");
err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
if (err) {
err = -EFAULT;
break;
}
if (req.mtd_num < 0 ||
(req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
err = -EINVAL;
break;
}
mtd = get_mtd_device(NULL, req.mtd_num);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
break;
}
/*
* Note, further request verification is done by
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
else
/* @err contains UBI device number */
err = put_user(err, (__user int32_t *)argp);
break;
}
/* Detach an MTD device command */
case UBI_IOCDET:
{
int ubi_num;
dbg_gen("dettach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_detach_mtd_dev(ubi_num, 0);
mutex_unlock(&ubi_devices_mutex);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
#ifdef CONFIG_COMPAT
static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return vol_cdev_ioctl(file, cmd, translated_arg);
}
static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ubi_cdev_ioctl(file, cmd, translated_arg);
}
static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ctrl_cdev_ioctl(file, cmd, translated_arg);
}
#else
#define vol_cdev_compat_ioctl NULL
#define ubi_cdev_compat_ioctl NULL
#define ctrl_cdev_compat_ioctl NULL
#endif
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
.open = vol_cdev_open,
.release = vol_cdev_release,
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
.compat_ioctl = vol_cdev_compat_ioctl,
};
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = ubi_cdev_compat_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = ctrl_cdev_compat_ioctl,
.llseek = no_llseek,
};
| gpl-2.0 |
kjjjnob/android_kernel_lge_g3 | arch/arm/mach-exynos/mach-universal_c210.c | 4622 | 28507 | /* linux/arch/arm/mach-exynos4/mach-universal_c210.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/mfd/max8998.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/max8952.h>
#include <linux/mmc/host.h>
#include <linux/i2c-gpio.h>
#include <linux/i2c/mcs.h>
#include <linux/i2c/atmel_mxt_ts.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/iic.h>
#include <plat/gpio-cfg.h>
#include <plat/fb.h>
#include <plat/mfc.h>
#include <plat/sdhci.h>
#include <plat/pd.h>
#include <plat/regs-fb-v4.h>
#include <plat/fimc-core.h>
#include <plat/s5p-time.h>
#include <plat/camport.h>
#include <plat/mipi_csis.h>
#include <mach/map.h>
#include <media/v4l2-mediabus.h>
#include <media/s5p_fimc.h>
#include <media/m5mols.h>
#include <media/s5k6aa.h>
#include "common.h"
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define UNIVERSAL_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define UNIVERSAL_ULCON_DEFAULT S3C2410_LCON_CS8
#define UNIVERSAL_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S5PV210_UFCON_TXTRIG256 | \
S5PV210_UFCON_RXTRIG256)
static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.ucon = UNIVERSAL_UCON_DEFAULT,
.ulcon = UNIVERSAL_ULCON_DEFAULT,
.ufcon = UNIVERSAL_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.ucon = UNIVERSAL_UCON_DEFAULT,
.ulcon = UNIVERSAL_ULCON_DEFAULT,
.ufcon = UNIVERSAL_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.ucon = UNIVERSAL_UCON_DEFAULT,
.ulcon = UNIVERSAL_ULCON_DEFAULT,
.ufcon = UNIVERSAL_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.ucon = UNIVERSAL_UCON_DEFAULT,
.ulcon = UNIVERSAL_ULCON_DEFAULT,
.ufcon = UNIVERSAL_UFCON_DEFAULT,
},
};
static struct regulator_consumer_supply max8952_consumer =
REGULATOR_SUPPLY("vdd_arm", NULL);
static struct max8952_platform_data universal_max8952_pdata __initdata = {
.gpio_vid0 = EXYNOS4_GPX0(3),
.gpio_vid1 = EXYNOS4_GPX0(4),
.gpio_en = -1, /* Not controllable, set "Always High" */
.default_mode = 0, /* vid0 = 0, vid1 = 0 */
.dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */
.sync_freq = 0, /* default: fastest */
.ramp_speed = 0, /* default: fastest */
.reg_data = {
.constraints = {
.name = "VARM_1.2V",
.min_uV = 770000,
.max_uV = 1400000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
.always_on = 1,
.boot_on = 1,
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8952_consumer,
},
};
static struct regulator_consumer_supply lp3974_buck1_consumer =
REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply lp3974_buck2_consumer =
REGULATOR_SUPPLY("vddg3d", NULL);
static struct regulator_consumer_supply lp3974_buck3_consumer[] = {
REGULATOR_SUPPLY("vdet", "s5p-sdo"),
REGULATOR_SUPPLY("vdd_reg", "0-003c"),
};
static struct regulator_init_data lp3974_buck1_data = {
.constraints = {
.name = "VINT_1.1V",
.min_uV = 750000,
.max_uV = 1500000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_buck1_consumer,
};
static struct regulator_init_data lp3974_buck2_data = {
.constraints = {
.name = "VG3D_1.1V",
.min_uV = 750000,
.max_uV = 1500000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_buck2_consumer,
};
static struct regulator_init_data lp3974_buck3_data = {
.constraints = {
.name = "VCC_1.8V",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_buck3_consumer),
.consumer_supplies = lp3974_buck3_consumer,
};
static struct regulator_init_data lp3974_buck4_data = {
.constraints = {
.name = "VMEM_1.2V",
.min_uV = 1200000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_ldo2_data = {
.constraints = {
.name = "VALIVE_1.2V",
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
};
static struct regulator_consumer_supply lp3974_ldo3_consumer[] = {
REGULATOR_SUPPLY("vdd", "exynos4-hdmi"),
REGULATOR_SUPPLY("vdd_pll", "exynos4-hdmi"),
REGULATOR_SUPPLY("vdd11", "s5p-mipi-csis.0"),
};
static struct regulator_init_data lp3974_ldo3_data = {
.constraints = {
.name = "VUSB+MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_ldo3_consumer),
.consumer_supplies = lp3974_ldo3_consumer,
};
static struct regulator_consumer_supply lp3974_ldo4_consumer[] = {
REGULATOR_SUPPLY("vdd_osc", "exynos4-hdmi"),
};
static struct regulator_init_data lp3974_ldo4_data = {
.constraints = {
.name = "VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_ldo4_consumer),
.consumer_supplies = lp3974_ldo4_consumer,
};
static struct regulator_init_data lp3974_ldo5_data = {
.constraints = {
.name = "VTF_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_ldo6_data = {
.constraints = {
.name = "LDO6",
.min_uV = 2000000,
.max_uV = 2000000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_consumer_supply lp3974_ldo7_consumer[] = {
REGULATOR_SUPPLY("vdd18", "s5p-mipi-csis.0"),
};
static struct regulator_init_data lp3974_ldo7_data = {
.constraints = {
.name = "VLCD+VMIPI_1.8V",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_ldo7_consumer),
.consumer_supplies = lp3974_ldo7_consumer,
};
static struct regulator_consumer_supply lp3974_ldo8_consumer[] = {
REGULATOR_SUPPLY("vdd33a_dac", "s5p-sdo"),
};
static struct regulator_init_data lp3974_ldo8_data = {
.constraints = {
.name = "VUSB+VDAC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_ldo8_consumer),
.consumer_supplies = lp3974_ldo8_consumer,
};
static struct regulator_consumer_supply lp3974_ldo9_consumer =
REGULATOR_SUPPLY("vddio", "0-003c");
static struct regulator_init_data lp3974_ldo9_data = {
.constraints = {
.name = "VCC_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_ldo9_consumer,
};
static struct regulator_init_data lp3974_ldo10_data = {
.constraints = {
.name = "VPLL_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.boot_on = 1,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_consumer_supply lp3974_ldo11_consumer =
REGULATOR_SUPPLY("dig_28", "0-001f");
static struct regulator_init_data lp3974_ldo11_data = {
.constraints = {
.name = "CAM_AF_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_ldo11_consumer,
};
static struct regulator_init_data lp3974_ldo12_data = {
.constraints = {
.name = "PS_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_ldo13_data = {
.constraints = {
.name = "VHIC_1.2V",
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_consumer_supply lp3974_ldo14_consumer =
REGULATOR_SUPPLY("dig_18", "0-001f");
static struct regulator_init_data lp3974_ldo14_data = {
.constraints = {
.name = "CAM_I_HOST_1.8V",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_ldo14_consumer,
};
static struct regulator_consumer_supply lp3974_ldo15_consumer =
REGULATOR_SUPPLY("dig_12", "0-001f");
static struct regulator_init_data lp3974_ldo15_data = {
.constraints = {
.name = "CAM_S_DIG+FM33_CORE_1.2V",
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &lp3974_ldo15_consumer,
};
static struct regulator_consumer_supply lp3974_ldo16_consumer[] = {
REGULATOR_SUPPLY("vdda", "0-003c"),
REGULATOR_SUPPLY("a_sensor", "0-001f"),
};
static struct regulator_init_data lp3974_ldo16_data = {
.constraints = {
.name = "CAM_S_ANA_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = ARRAY_SIZE(lp3974_ldo16_consumer),
.consumer_supplies = lp3974_ldo16_consumer,
};
static struct regulator_init_data lp3974_ldo17_data = {
.constraints = {
.name = "VCC_3.0V_LCD",
.min_uV = 3000000,
.max_uV = 3000000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_32khz_ap_data = {
.constraints = {
.name = "32KHz AP",
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
};
static struct regulator_init_data lp3974_32khz_cp_data = {
.constraints = {
.name = "32KHz CP",
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_vichg_data = {
.constraints = {
.name = "VICHG",
.state_mem = {
.disabled = 1,
},
},
};
static struct regulator_init_data lp3974_esafeout1_data = {
.constraints = {
.name = "SAFEOUT1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.enabled = 1,
},
},
};
static struct regulator_init_data lp3974_esafeout2_data = {
.constraints = {
.name = "SAFEOUT2",
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.enabled = 1,
},
},
};
static struct max8998_regulator_data lp3974_regulators[] = {
{ MAX8998_LDO2, &lp3974_ldo2_data },
{ MAX8998_LDO3, &lp3974_ldo3_data },
{ MAX8998_LDO4, &lp3974_ldo4_data },
{ MAX8998_LDO5, &lp3974_ldo5_data },
{ MAX8998_LDO6, &lp3974_ldo6_data },
{ MAX8998_LDO7, &lp3974_ldo7_data },
{ MAX8998_LDO8, &lp3974_ldo8_data },
{ MAX8998_LDO9, &lp3974_ldo9_data },
{ MAX8998_LDO10, &lp3974_ldo10_data },
{ MAX8998_LDO11, &lp3974_ldo11_data },
{ MAX8998_LDO12, &lp3974_ldo12_data },
{ MAX8998_LDO13, &lp3974_ldo13_data },
{ MAX8998_LDO14, &lp3974_ldo14_data },
{ MAX8998_LDO15, &lp3974_ldo15_data },
{ MAX8998_LDO16, &lp3974_ldo16_data },
{ MAX8998_LDO17, &lp3974_ldo17_data },
{ MAX8998_BUCK1, &lp3974_buck1_data },
{ MAX8998_BUCK2, &lp3974_buck2_data },
{ MAX8998_BUCK3, &lp3974_buck3_data },
{ MAX8998_BUCK4, &lp3974_buck4_data },
{ MAX8998_EN32KHZ_AP, &lp3974_32khz_ap_data },
{ MAX8998_EN32KHZ_CP, &lp3974_32khz_cp_data },
{ MAX8998_ENVICHG, &lp3974_vichg_data },
{ MAX8998_ESAFEOUT1, &lp3974_esafeout1_data },
{ MAX8998_ESAFEOUT2, &lp3974_esafeout2_data },
};
static struct max8998_platform_data universal_lp3974_pdata = {
.num_regulators = ARRAY_SIZE(lp3974_regulators),
.regulators = lp3974_regulators,
.buck1_voltage1 = 1100000, /* INT */
.buck1_voltage2 = 1000000,
.buck1_voltage3 = 1100000,
.buck1_voltage4 = 1000000,
.buck1_set1 = EXYNOS4_GPX0(5),
.buck1_set2 = EXYNOS4_GPX0(6),
.buck2_voltage1 = 1200000, /* G3D */
.buck2_voltage2 = 1100000,
.buck1_default_idx = 0,
.buck2_set3 = EXYNOS4_GPE2(0),
.buck2_default_idx = 0,
.wakeup = true,
};
enum fixed_regulator_id {
FIXED_REG_ID_MMC0,
FIXED_REG_ID_HDMI_5V,
FIXED_REG_ID_CAM_S_IF,
FIXED_REG_ID_CAM_I_CORE,
FIXED_REG_ID_CAM_VT_DIO,
};
static struct regulator_consumer_supply hdmi_fixed_consumer =
REGULATOR_SUPPLY("hdmi-en", "exynos4-hdmi");
static struct regulator_init_data hdmi_fixed_voltage_init_data = {
.constraints = {
.name = "HDMI_5V",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &hdmi_fixed_consumer,
};
static struct fixed_voltage_config hdmi_fixed_voltage_config = {
.supply_name = "HDMI_EN1",
.microvolts = 5000000,
.gpio = EXYNOS4_GPE0(1),
.enable_high = true,
.init_data = &hdmi_fixed_voltage_init_data,
};
static struct platform_device hdmi_fixed_voltage = {
.name = "reg-fixed-voltage",
.id = FIXED_REG_ID_HDMI_5V,
.dev = {
.platform_data = &hdmi_fixed_voltage_config,
},
};
/* GPIO I2C 5 (PMIC) */
static struct i2c_board_info i2c5_devs[] __initdata = {
{
I2C_BOARD_INFO("max8952", 0xC0 >> 1),
.platform_data = &universal_max8952_pdata,
}, {
I2C_BOARD_INFO("lp3974", 0xCC >> 1),
.platform_data = &universal_lp3974_pdata,
},
};
/* I2C3 (TSP) */
static struct mxt_platform_data qt602240_platform_data = {
.x_line = 19,
.y_line = 11,
.x_size = 800,
.y_size = 480,
.blen = 0x11,
.threshold = 0x28,
.voltage = 2800000, /* 2.8V */
.orient = MXT_DIAGONAL,
.irqflags = IRQF_TRIGGER_FALLING,
};
static struct i2c_board_info i2c3_devs[] __initdata = {
{
I2C_BOARD_INFO("qt602240_ts", 0x4a),
.platform_data = &qt602240_platform_data,
},
};
static void __init universal_tsp_init(void)
{
int gpio;
/* TSP_LDO_ON: XMDMADDR_11 */
gpio = EXYNOS4_GPE2(3);
gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "TSP_LDO_ON");
gpio_export(gpio, 0);
/* TSP_INT: XMDMADDR_7 */
gpio = EXYNOS4_GPE1(7);
gpio_request(gpio, "TSP_INT");
s5p_register_gpio_interrupt(gpio);
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
i2c3_devs[0].irq = gpio_to_irq(gpio);
}
/* GPIO I2C 12 (3 Touchkey) */
static uint32_t touchkey_keymap[] = {
/* MCS_KEY_MAP(value, keycode) */
MCS_KEY_MAP(0, KEY_MENU), /* KEY_SEND */
MCS_KEY_MAP(1, KEY_BACK), /* KEY_END */
};
static struct mcs_platform_data touchkey_data = {
.keymap = touchkey_keymap,
.keymap_size = ARRAY_SIZE(touchkey_keymap),
.key_maxval = 2,
};
/* GPIO I2C 3_TOUCH 2.8V */
#define I2C_GPIO_BUS_12 12
static struct i2c_gpio_platform_data i2c_gpio12_data = {
.sda_pin = EXYNOS4_GPE4(0), /* XMDMDATA_8 */
.scl_pin = EXYNOS4_GPE4(1), /* XMDMDATA_9 */
};
static struct platform_device i2c_gpio12 = {
.name = "i2c-gpio",
.id = I2C_GPIO_BUS_12,
.dev = {
.platform_data = &i2c_gpio12_data,
},
};
static struct i2c_board_info i2c_gpio12_devs[] __initdata = {
{
I2C_BOARD_INFO("mcs5080_touchkey", 0x20),
.platform_data = &touchkey_data,
},
};
static void __init universal_touchkey_init(void)
{
int gpio;
gpio = EXYNOS4_GPE3(7); /* XMDMDATA_7 */
gpio_request(gpio, "3_TOUCH_INT");
s5p_register_gpio_interrupt(gpio);
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
i2c_gpio12_devs[0].irq = gpio_to_irq(gpio);
gpio = EXYNOS4_GPE3(3); /* XMDMDATA_3 */
gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "3_TOUCH_EN");
}
static struct s3c2410_platform_i2c universal_i2c0_platdata __initdata = {
.frequency = 300 * 1000,
.sda_delay = 200,
};
/* GPIO KEYS */
static struct gpio_keys_button universal_gpio_keys_tables[] = {
{
.code = KEY_VOLUMEUP,
.gpio = EXYNOS4_GPX2(0), /* XEINT16 */
.desc = "gpio-keys: KEY_VOLUMEUP",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_VOLUMEDOWN,
.gpio = EXYNOS4_GPX2(1), /* XEINT17 */
.desc = "gpio-keys: KEY_VOLUMEDOWN",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_CONFIG,
.gpio = EXYNOS4_GPX2(2), /* XEINT18 */
.desc = "gpio-keys: KEY_CONFIG",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_CAMERA,
.gpio = EXYNOS4_GPX2(3), /* XEINT19 */
.desc = "gpio-keys: KEY_CAMERA",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
}, {
.code = KEY_OK,
.gpio = EXYNOS4_GPX3(5), /* XEINT29 */
.desc = "gpio-keys: KEY_OK",
.type = EV_KEY,
.active_low = 1,
.debounce_interval = 1,
},
};
static struct gpio_keys_platform_data universal_gpio_keys_data = {
.buttons = universal_gpio_keys_tables,
.nbuttons = ARRAY_SIZE(universal_gpio_keys_tables),
};
static struct platform_device universal_gpio_keys = {
.name = "gpio-keys",
.dev = {
.platform_data = &universal_gpio_keys_data,
},
};
/* eMMC */
static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = {
.max_width = 8,
.host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
.host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
.cd_type = S3C_SDHCI_CD_PERMANENT,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct regulator_consumer_supply mmc0_supplies[] = {
REGULATOR_SUPPLY("vmmc", "exynos4-sdhci.0"),
};
static struct regulator_init_data mmc0_fixed_voltage_init_data = {
.constraints = {
.name = "VMEM_VDD_2.8V",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(mmc0_supplies),
.consumer_supplies = mmc0_supplies,
};
static struct fixed_voltage_config mmc0_fixed_voltage_config = {
.supply_name = "MASSMEMORY_EN",
.microvolts = 2800000,
.gpio = EXYNOS4_GPE1(3),
.enable_high = true,
.init_data = &mmc0_fixed_voltage_init_data,
};
static struct platform_device mmc0_fixed_voltage = {
.name = "reg-fixed-voltage",
.id = FIXED_REG_ID_MMC0,
.dev = {
.platform_data = &mmc0_fixed_voltage_config,
},
};
/* SD */
static struct s3c_sdhci_platdata universal_hsmmc2_data __initdata = {
.max_width = 4,
.host_caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.ext_cd_gpio = EXYNOS4_GPX3(4), /* XEINT_28 */
.ext_cd_gpio_invert = 1,
.cd_type = S3C_SDHCI_CD_GPIO,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
/* WiFi */
static struct s3c_sdhci_platdata universal_hsmmc3_data __initdata = {
.max_width = 4,
.host_caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.cd_type = S3C_SDHCI_CD_EXTERNAL,
};
static void __init universal_sdhci_init(void)
{
s3c_sdhci0_set_platdata(&universal_hsmmc0_data);
s3c_sdhci2_set_platdata(&universal_hsmmc2_data);
s3c_sdhci3_set_platdata(&universal_hsmmc3_data);
}
/* I2C1 */
static struct i2c_board_info i2c1_devs[] __initdata = {
/* Gyro, To be updated */
};
/* Frame Buffer */
static struct s3c_fb_pd_win universal_fb_win0 = {
.win_mode = {
.left_margin = 16,
.right_margin = 16,
.upper_margin = 2,
.lower_margin = 28,
.hsync_len = 2,
.vsync_len = 1,
.xres = 480,
.yres = 800,
.refresh = 55,
},
.max_bpp = 32,
.default_bpp = 16,
.virtual_x = 480,
.virtual_y = 2 * 800,
};
static struct s3c_fb_platdata universal_lcd_pdata __initdata = {
.win[0] = &universal_fb_win0,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
VIDCON0_CLKSEL_LCD,
.vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
| VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
};
static struct regulator_consumer_supply cam_vt_dio_supply =
REGULATOR_SUPPLY("vdd_core", "0-003c");
static struct regulator_init_data cam_vt_dio_reg_init_data = {
.constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS },
.num_consumer_supplies = 1,
.consumer_supplies = &cam_vt_dio_supply,
};
static struct fixed_voltage_config cam_vt_dio_fixed_voltage_cfg = {
.supply_name = "CAM_VT_D_IO",
.microvolts = 2800000,
.gpio = EXYNOS4_GPE2(1), /* CAM_PWR_EN2 */
.enable_high = 1,
.init_data = &cam_vt_dio_reg_init_data,
};
static struct platform_device cam_vt_dio_fixed_reg_dev = {
.name = "reg-fixed-voltage", .id = FIXED_REG_ID_CAM_VT_DIO,
.dev = { .platform_data = &cam_vt_dio_fixed_voltage_cfg },
};
static struct regulator_consumer_supply cam_i_core_supply =
REGULATOR_SUPPLY("core", "0-001f");
static struct regulator_init_data cam_i_core_reg_init_data = {
.constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS },
.num_consumer_supplies = 1,
.consumer_supplies = &cam_i_core_supply,
};
static struct fixed_voltage_config cam_i_core_fixed_voltage_cfg = {
.supply_name = "CAM_I_CORE_1.2V",
.microvolts = 1200000,
.gpio = EXYNOS4_GPE2(2), /* CAM_8M_CORE_EN */
.enable_high = 1,
.init_data = &cam_i_core_reg_init_data,
};
static struct platform_device cam_i_core_fixed_reg_dev = {
.name = "reg-fixed-voltage", .id = FIXED_REG_ID_CAM_I_CORE,
.dev = { .platform_data = &cam_i_core_fixed_voltage_cfg },
};
static struct regulator_consumer_supply cam_s_if_supply =
REGULATOR_SUPPLY("d_sensor", "0-001f");
static struct regulator_init_data cam_s_if_reg_init_data = {
.constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS },
.num_consumer_supplies = 1,
.consumer_supplies = &cam_s_if_supply,
};
static struct fixed_voltage_config cam_s_if_fixed_voltage_cfg = {
.supply_name = "CAM_S_IF_1.8V",
.microvolts = 1800000,
.gpio = EXYNOS4_GPE3(0), /* CAM_PWR_EN1 */
.enable_high = 1,
.init_data = &cam_s_if_reg_init_data,
};
static struct platform_device cam_s_if_fixed_reg_dev = {
.name = "reg-fixed-voltage", .id = FIXED_REG_ID_CAM_S_IF,
.dev = { .platform_data = &cam_s_if_fixed_voltage_cfg },
};
static struct s5p_platform_mipi_csis mipi_csis_platdata = {
.clk_rate = 166000000UL,
.lanes = 2,
.alignment = 32,
.hs_settle = 12,
.phy_enable = s5p_csis_phy_enable,
};
#define GPIO_CAM_LEVEL_EN(n) EXYNOS4_GPE4(n + 3)
#define GPIO_CAM_8M_ISP_INT EXYNOS4_GPX1(5) /* XEINT_13 */
#define GPIO_CAM_MEGA_nRST EXYNOS4_GPE2(5)
#define GPIO_CAM_VGA_NRST EXYNOS4_GPE4(7)
#define GPIO_CAM_VGA_NSTBY EXYNOS4_GPE4(6)
static int s5k6aa_set_power(int on)
{
gpio_set_value(GPIO_CAM_LEVEL_EN(2), !!on);
return 0;
}
static struct s5k6aa_platform_data s5k6aa_platdata = {
.mclk_frequency = 21600000UL,
.gpio_reset = { GPIO_CAM_VGA_NRST, 0 },
.gpio_stby = { GPIO_CAM_VGA_NSTBY, 0 },
.bus_type = V4L2_MBUS_PARALLEL,
.horiz_flip = 1,
.set_power = s5k6aa_set_power,
};
static struct i2c_board_info s5k6aa_board_info = {
I2C_BOARD_INFO("S5K6AA", 0x3C),
.platform_data = &s5k6aa_platdata,
};
static int m5mols_set_power(struct device *dev, int on)
{
gpio_set_value(GPIO_CAM_LEVEL_EN(1), !on);
gpio_set_value(GPIO_CAM_LEVEL_EN(2), !!on);
return 0;
}
static struct m5mols_platform_data m5mols_platdata = {
.gpio_reset = GPIO_CAM_MEGA_nRST,
.reset_polarity = 0,
.set_power = m5mols_set_power,
};
static struct i2c_board_info m5mols_board_info = {
I2C_BOARD_INFO("M5MOLS", 0x1F),
.platform_data = &m5mols_platdata,
};
static struct s5p_fimc_isp_info universal_camera_sensors[] = {
{
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
.bus_type = FIMC_ITU_601,
.board_info = &s5k6aa_board_info,
.i2c_bus_num = 0,
.clk_frequency = 24000000UL,
}, {
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
.bus_type = FIMC_MIPI_CSI2,
.board_info = &m5mols_board_info,
.i2c_bus_num = 0,
.clk_frequency = 24000000UL,
.csi_data_align = 32,
},
};
static struct s5p_platform_fimc fimc_md_platdata = {
.isp_info = universal_camera_sensors,
.num_clients = ARRAY_SIZE(universal_camera_sensors),
};
static struct gpio universal_camera_gpios[] = {
{ GPIO_CAM_LEVEL_EN(1), GPIOF_OUT_INIT_HIGH, "CAM_LVL_EN1" },
{ GPIO_CAM_LEVEL_EN(2), GPIOF_OUT_INIT_LOW, "CAM_LVL_EN2" },
{ GPIO_CAM_8M_ISP_INT, GPIOF_IN, "8M_ISP_INT" },
{ GPIO_CAM_MEGA_nRST, GPIOF_OUT_INIT_LOW, "CAM_8M_NRST" },
{ GPIO_CAM_VGA_NRST, GPIOF_OUT_INIT_LOW, "CAM_VGA_NRST" },
{ GPIO_CAM_VGA_NSTBY, GPIOF_OUT_INIT_LOW, "CAM_VGA_NSTBY" },
};
static void __init universal_camera_init(void)
{
s3c_set_platdata(&mipi_csis_platdata, sizeof(mipi_csis_platdata),
&s5p_device_mipi_csis0);
s3c_set_platdata(&fimc_md_platdata, sizeof(fimc_md_platdata),
&s5p_device_fimc_md);
if (gpio_request_array(universal_camera_gpios,
ARRAY_SIZE(universal_camera_gpios))) {
pr_err("%s: GPIO request failed\n", __func__);
return;
}
if (!s3c_gpio_cfgpin(GPIO_CAM_8M_ISP_INT, S3C_GPIO_SFN(0xf)))
m5mols_board_info.irq = gpio_to_irq(GPIO_CAM_8M_ISP_INT);
else
pr_err("Failed to configure 8M_ISP_INT GPIO\n");
/* Free GPIOs controlled directly by the sensor drivers. */
gpio_free(GPIO_CAM_MEGA_nRST);
gpio_free(GPIO_CAM_8M_ISP_INT);
gpio_free(GPIO_CAM_VGA_NRST);
gpio_free(GPIO_CAM_VGA_NSTBY);
if (exynos4_fimc_setup_gpio(S5P_CAMPORT_A))
pr_err("Camera port A setup failed\n");
}
static struct platform_device *universal_devices[] __initdata = {
/* Samsung Platform Devices */
&s5p_device_mipi_csis0,
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
&s5p_device_fimc3,
&s5p_device_g2d,
&mmc0_fixed_voltage,
&s3c_device_hsmmc0,
&s3c_device_hsmmc2,
&s3c_device_hsmmc3,
&s3c_device_i2c0,
&s3c_device_i2c3,
&s3c_device_i2c5,
&s5p_device_i2c_hdmiphy,
&hdmi_fixed_voltage,
&s5p_device_hdmi,
&s5p_device_sdo,
&s5p_device_mixer,
/* Universal Devices */
&i2c_gpio12,
&universal_gpio_keys,
&s5p_device_onenand,
&s5p_device_fimd0,
&s5p_device_jpeg,
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&cam_vt_dio_fixed_reg_dev,
&cam_i_core_fixed_reg_dev,
&cam_s_if_fixed_reg_dev,
&s5p_device_fimc_md,
};
static void __init universal_map_io(void)
{
clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
}
static void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug");
s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3));
s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE);
}
static void __init universal_reserve(void)
{
s5p_mfc_reserve_mem(0x43000000, 8 << 20, 0x51000000, 8 << 20);
}
static void __init universal_machine_init(void)
{
universal_sdhci_init();
s5p_tv_setup();
s3c_i2c0_set_platdata(&universal_i2c0_platdata);
i2c_register_board_info(1, i2c1_devs, ARRAY_SIZE(i2c1_devs));
universal_tsp_init();
s3c_i2c3_set_platdata(NULL);
i2c_register_board_info(3, i2c3_devs, ARRAY_SIZE(i2c3_devs));
s3c_i2c5_set_platdata(NULL);
s5p_i2c_hdmiphy_set_platdata(NULL);
i2c_register_board_info(5, i2c5_devs, ARRAY_SIZE(i2c5_devs));
s5p_fimd0_set_platdata(&universal_lcd_pdata);
universal_touchkey_init();
i2c_register_board_info(I2C_GPIO_BUS_12, i2c_gpio12_devs,
ARRAY_SIZE(i2c_gpio12_devs));
universal_camera_init();
/* Last */
platform_add_devices(universal_devices, ARRAY_SIZE(universal_devices));
}
MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
/* Maintainer: Kyungmin Park <kyungmin.park@samsung.com> */
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = universal_map_io,
.handle_irq = gic_handle_irq,
.init_machine = universal_machine_init,
.timer = &s5p_timer,
.reserve = &universal_reserve,
.restart = exynos4_restart,
MACHINE_END
| gpl-2.0 |
rlnelson-git/linux-nvme | drivers/macintosh/ams/ams-core.c | 4878 | 6091 | /*
* Apple Motion Sensor driver
*
* Copyright (C) 2005 Stelian Pop (stelian@popies.net)
* Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/pmac_pfunc.h>
#include "ams.h"
/* There is only one motion sensor per machine */
struct ams ams_info;
static bool verbose;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Show free falls and shocks in kernel output");
/* Call with ams_info.lock held! */
void ams_sensors(s8 *x, s8 *y, s8 *z)
{
u32 orient = ams_info.vflag? ams_info.orient1 : ams_info.orient2;
if (orient & 0x80)
/* X and Y swapped */
ams_info.get_xyz(y, x, z);
else
ams_info.get_xyz(x, y, z);
if (orient & 0x04)
*z = ~(*z);
if (orient & 0x02)
*y = ~(*y);
if (orient & 0x01)
*x = ~(*x);
}
static ssize_t ams_show_current(struct device *dev,
struct device_attribute *attr, char *buf)
{
s8 x, y, z;
mutex_lock(&ams_info.lock);
ams_sensors(&x, &y, &z);
mutex_unlock(&ams_info.lock);
return snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z);
}
static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL);
static void ams_handle_irq(void *data)
{
enum ams_irq irq = *((enum ams_irq *)data);
spin_lock(&ams_info.irq_lock);
ams_info.worker_irqs |= irq;
schedule_work(&ams_info.worker);
spin_unlock(&ams_info.irq_lock);
}
static enum ams_irq ams_freefall_irq_data = AMS_IRQ_FREEFALL;
static struct pmf_irq_client ams_freefall_client = {
.owner = THIS_MODULE,
.handler = ams_handle_irq,
.data = &ams_freefall_irq_data,
};
static enum ams_irq ams_shock_irq_data = AMS_IRQ_SHOCK;
static struct pmf_irq_client ams_shock_client = {
.owner = THIS_MODULE,
.handler = ams_handle_irq,
.data = &ams_shock_irq_data,
};
/* Once hard disk parking is implemented in the kernel, this function can
* trigger it.
*/
static void ams_worker(struct work_struct *work)
{
unsigned long flags;
u8 irqs_to_clear;
mutex_lock(&ams_info.lock);
spin_lock_irqsave(&ams_info.irq_lock, flags);
irqs_to_clear = ams_info.worker_irqs;
if (ams_info.worker_irqs & AMS_IRQ_FREEFALL) {
if (verbose)
printk(KERN_INFO "ams: freefall detected!\n");
ams_info.worker_irqs &= ~AMS_IRQ_FREEFALL;
}
if (ams_info.worker_irqs & AMS_IRQ_SHOCK) {
if (verbose)
printk(KERN_INFO "ams: shock detected!\n");
ams_info.worker_irqs &= ~AMS_IRQ_SHOCK;
}
spin_unlock_irqrestore(&ams_info.irq_lock, flags);
ams_info.clear_irq(irqs_to_clear);
mutex_unlock(&ams_info.lock);
}
/* Call with ams_info.lock held! */
int ams_sensor_attach(void)
{
int result;
const u32 *prop;
/* Get orientation */
prop = of_get_property(ams_info.of_node, "orientation", NULL);
if (!prop)
return -ENODEV;
ams_info.orient1 = *prop;
ams_info.orient2 = *(prop + 1);
/* Register freefall interrupt handler */
result = pmf_register_irq_client(ams_info.of_node,
"accel-int-1",
&ams_freefall_client);
if (result < 0)
return -ENODEV;
/* Reset saved irqs */
ams_info.worker_irqs = 0;
/* Register shock interrupt handler */
result = pmf_register_irq_client(ams_info.of_node,
"accel-int-2",
&ams_shock_client);
if (result < 0)
goto release_freefall;
/* Create device */
ams_info.of_dev = of_platform_device_create(ams_info.of_node, "ams", NULL);
if (!ams_info.of_dev) {
result = -ENODEV;
goto release_shock;
}
/* Create attributes */
result = device_create_file(&ams_info.of_dev->dev, &dev_attr_current);
if (result)
goto release_of;
ams_info.vflag = !!(ams_info.get_vendor() & 0x10);
/* Init input device */
result = ams_input_init();
if (result)
goto release_device_file;
return result;
release_device_file:
device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
release_of:
of_device_unregister(ams_info.of_dev);
release_shock:
pmf_unregister_irq_client(&ams_shock_client);
release_freefall:
pmf_unregister_irq_client(&ams_freefall_client);
return result;
}
int __init ams_init(void)
{
struct device_node *np;
spin_lock_init(&ams_info.irq_lock);
mutex_init(&ams_info.lock);
INIT_WORK(&ams_info.worker, ams_worker);
#ifdef CONFIG_SENSORS_AMS_I2C
np = of_find_node_by_name(NULL, "accelerometer");
if (np && of_device_is_compatible(np, "AAPL,accelerometer_1"))
/* Found I2C motion sensor */
return ams_i2c_init(np);
#endif
#ifdef CONFIG_SENSORS_AMS_PMU
np = of_find_node_by_name(NULL, "sms");
if (np && of_device_is_compatible(np, "sms"))
/* Found PMU motion sensor */
return ams_pmu_init(np);
#endif
return -ENODEV;
}
void ams_sensor_detach(void)
{
/* Remove input device */
ams_input_exit();
/* Remove attributes */
device_remove_file(&ams_info.of_dev->dev, &dev_attr_current);
/* Flush interrupt worker
*
* We do this after ams_info.exit(), because an interrupt might
* have arrived before disabling them.
*/
flush_work(&ams_info.worker);
/* Remove device */
of_device_unregister(ams_info.of_dev);
/* Remove handler */
pmf_unregister_irq_client(&ams_shock_client);
pmf_unregister_irq_client(&ams_freefall_client);
}
static void __exit ams_exit(void)
{
/* Shut down implementation */
ams_info.exit();
}
MODULE_AUTHOR("Stelian Pop, Michael Hanselmann");
MODULE_DESCRIPTION("Apple Motion Sensor driver");
MODULE_LICENSE("GPL");
module_init(ams_init);
module_exit(ams_exit);
| gpl-2.0 |
mathkid95/linux_lg_lollipop | drivers/staging/iio/adc/adt7310.c | 4878 | 20917 | /*
* ADT7310 digital temperature sensor driver supporting ADT7310
*
* Copyright 2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../events.h"
/*
* ADT7310 registers definition
*/
#define ADT7310_STATUS 0
#define ADT7310_CONFIG 1
#define ADT7310_TEMPERATURE 2
#define ADT7310_ID 3
#define ADT7310_T_CRIT 4
#define ADT7310_T_HYST 5
#define ADT7310_T_ALARM_HIGH 6
#define ADT7310_T_ALARM_LOW 7
/*
* ADT7310 status
*/
#define ADT7310_STAT_T_LOW 0x10
#define ADT7310_STAT_T_HIGH 0x20
#define ADT7310_STAT_T_CRIT 0x40
#define ADT7310_STAT_NOT_RDY 0x80
/*
* ADT7310 config
*/
#define ADT7310_FAULT_QUEUE_MASK 0x3
#define ADT7310_CT_POLARITY 0x4
#define ADT7310_INT_POLARITY 0x8
#define ADT7310_EVENT_MODE 0x10
#define ADT7310_MODE_MASK 0x60
#define ADT7310_ONESHOT 0x20
#define ADT7310_SPS 0x40
#define ADT7310_PD 0x60
#define ADT7310_RESOLUTION 0x80
/*
* ADT7310 masks
*/
#define ADT7310_T16_VALUE_SIGN 0x8000
#define ADT7310_T16_VALUE_FLOAT_OFFSET 7
#define ADT7310_T16_VALUE_FLOAT_MASK 0x7F
#define ADT7310_T13_VALUE_SIGN 0x1000
#define ADT7310_T13_VALUE_OFFSET 3
#define ADT7310_T13_VALUE_FLOAT_OFFSET 4
#define ADT7310_T13_VALUE_FLOAT_MASK 0xF
#define ADT7310_T_HYST_MASK 0xF
#define ADT7310_DEVICE_ID_MASK 0x7
#define ADT7310_MANUFACTORY_ID_MASK 0xF8
#define ADT7310_MANUFACTORY_ID_OFFSET 3
#define ADT7310_CMD_REG_MASK 0x28
#define ADT7310_CMD_REG_OFFSET 3
#define ADT7310_CMD_READ 0x40
#define ADT7310_CMD_CON_READ 0x4
#define ADT7310_IRQS 2
/*
* struct adt7310_chip_info - chip specifc information
*/
struct adt7310_chip_info {
struct spi_device *spi_dev;
u8 config;
};
/*
* adt7310 register access by SPI
*/
static int adt7310_spi_read_word(struct adt7310_chip_info *chip, u8 reg, u16 *data)
{
struct spi_device *spi_dev = chip->spi_dev;
u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
int ret = 0;
command |= ADT7310_CMD_READ;
ret = spi_write(spi_dev, &command, sizeof(command));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI write command error\n");
return ret;
}
ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI read word error\n");
return ret;
}
*data = be16_to_cpu(*data);
return 0;
}
static int adt7310_spi_write_word(struct adt7310_chip_info *chip, u8 reg, u16 data)
{
struct spi_device *spi_dev = chip->spi_dev;
u8 buf[3];
int ret = 0;
buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
buf[1] = (u8)(data >> 8);
buf[2] = (u8)(data & 0xFF);
ret = spi_write(spi_dev, buf, 3);
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI write word error\n");
return ret;
}
return ret;
}
static int adt7310_spi_read_byte(struct adt7310_chip_info *chip, u8 reg, u8 *data)
{
struct spi_device *spi_dev = chip->spi_dev;
u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
int ret = 0;
command |= ADT7310_CMD_READ;
ret = spi_write(spi_dev, &command, sizeof(command));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI write command error\n");
return ret;
}
ret = spi_read(spi_dev, data, sizeof(*data));
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI read byte error\n");
return ret;
}
return 0;
}
static int adt7310_spi_write_byte(struct adt7310_chip_info *chip, u8 reg, u8 data)
{
struct spi_device *spi_dev = chip->spi_dev;
u8 buf[2];
int ret = 0;
buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
buf[1] = data;
ret = spi_write(spi_dev, buf, 2);
if (ret < 0) {
dev_err(&spi_dev->dev, "SPI write byte error\n");
return ret;
}
return ret;
}
static ssize_t adt7310_show_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 config;
config = chip->config & ADT7310_MODE_MASK;
switch (config) {
case ADT7310_PD:
return sprintf(buf, "power-down\n");
case ADT7310_ONESHOT:
return sprintf(buf, "one-shot\n");
case ADT7310_SPS:
return sprintf(buf, "sps\n");
default:
return sprintf(buf, "full\n");
}
}
static ssize_t adt7310_store_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
config = chip->config & (~ADT7310_MODE_MASK);
if (strcmp(buf, "power-down"))
config |= ADT7310_PD;
else if (strcmp(buf, "one-shot"))
config |= ADT7310_ONESHOT;
else if (strcmp(buf, "sps"))
config |= ADT7310_SPS;
ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
if (ret)
return -EIO;
chip->config = config;
return len;
}
static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
adt7310_show_mode,
adt7310_store_mode,
0);
static ssize_t adt7310_show_available_modes(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "full\none-shot\nsps\npower-down\n");
}
static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt7310_show_available_modes, NULL, 0);
static ssize_t adt7310_show_resolution(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
int bits;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
if (chip->config & ADT7310_RESOLUTION)
bits = 16;
else
bits = 13;
return sprintf(buf, "%d bits\n", bits);
}
static ssize_t adt7310_store_resolution(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
unsigned long data;
u16 config;
int ret;
ret = strict_strtoul(buf, 10, &data);
if (ret)
return -EINVAL;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
config = chip->config & (~ADT7310_RESOLUTION);
if (data)
config |= ADT7310_RESOLUTION;
ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
if (ret)
return -EIO;
chip->config = config;
return len;
}
static IIO_DEVICE_ATTR(resolution, S_IRUGO | S_IWUSR,
adt7310_show_resolution,
adt7310_store_resolution,
0);
static ssize_t adt7310_show_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 id;
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_ID, &id);
if (ret)
return -EIO;
return sprintf(buf, "device id: 0x%x\nmanufactory id: 0x%x\n",
id & ADT7310_DEVICE_ID_MASK,
(id & ADT7310_MANUFACTORY_ID_MASK) >> ADT7310_MANUFACTORY_ID_OFFSET);
}
static IIO_DEVICE_ATTR(id, S_IRUGO | S_IWUSR,
adt7310_show_id,
NULL,
0);
static ssize_t adt7310_convert_temperature(struct adt7310_chip_info *chip,
u16 data, char *buf)
{
char sign = ' ';
if (chip->config & ADT7310_RESOLUTION) {
if (data & ADT7310_T16_VALUE_SIGN) {
/* convert supplement to positive value */
data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
sign = '-';
}
return sprintf(buf, "%c%d.%.7d\n", sign,
(data >> ADT7310_T16_VALUE_FLOAT_OFFSET),
(data & ADT7310_T16_VALUE_FLOAT_MASK) * 78125);
} else {
if (data & ADT7310_T13_VALUE_SIGN) {
/* convert supplement to positive value */
data >>= ADT7310_T13_VALUE_OFFSET;
data = (ADT7310_T13_VALUE_SIGN << 1) - data;
sign = '-';
}
return sprintf(buf, "%c%d.%.4d\n", sign,
(data >> ADT7310_T13_VALUE_FLOAT_OFFSET),
(data & ADT7310_T13_VALUE_FLOAT_MASK) * 625);
}
}
static ssize_t adt7310_show_value(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 status;
u16 data;
int ret, i = 0;
do {
ret = adt7310_spi_read_byte(chip, ADT7310_STATUS, &status);
if (ret)
return -EIO;
i++;
if (i == 10000)
return -EIO;
} while (status & ADT7310_STAT_NOT_RDY);
ret = adt7310_spi_read_word(chip, ADT7310_TEMPERATURE, &data);
if (ret)
return -EIO;
return adt7310_convert_temperature(chip, data, buf);
}
static IIO_DEVICE_ATTR(value, S_IRUGO, adt7310_show_value, NULL, 0);
static struct attribute *adt7310_attributes[] = {
&iio_dev_attr_available_modes.dev_attr.attr,
&iio_dev_attr_mode.dev_attr.attr,
&iio_dev_attr_resolution.dev_attr.attr,
&iio_dev_attr_id.dev_attr.attr,
&iio_dev_attr_value.dev_attr.attr,
NULL,
};
static const struct attribute_group adt7310_attribute_group = {
.attrs = adt7310_attributes,
};
static irqreturn_t adt7310_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct adt7310_chip_info *chip = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns();
u8 status;
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_STATUS, &status);
if (ret)
return ret;
if (status & ADT7310_STAT_T_HIGH)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
if (status & ADT7310_STAT_T_LOW)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
timestamp);
if (status & ADT7310_STAT_T_CRIT)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
timestamp);
return IRQ_HANDLED;
}
static ssize_t adt7310_show_event_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
if (chip->config & ADT7310_EVENT_MODE)
return sprintf(buf, "interrupt\n");
else
return sprintf(buf, "comparator\n");
}
static ssize_t adt7310_set_event_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
config = chip->config &= ~ADT7310_EVENT_MODE;
if (strcmp(buf, "comparator") != 0)
config |= ADT7310_EVENT_MODE;
ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
if (ret)
return -EIO;
chip->config = config;
return len;
}
static ssize_t adt7310_show_available_event_modes(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "comparator\ninterrupt\n");
}
static ssize_t adt7310_show_fault_queue(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
return sprintf(buf, "%d\n", chip->config & ADT7310_FAULT_QUEUE_MASK);
}
static ssize_t adt7310_set_fault_queue(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
u8 config;
ret = strict_strtoul(buf, 10, &data);
if (ret || data > 3)
return -EINVAL;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret)
return -EIO;
config = chip->config & ~ADT7310_FAULT_QUEUE_MASK;
config |= data;
ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
if (ret)
return -EIO;
chip->config = config;
return len;
}
static inline ssize_t adt7310_show_t_bound(struct device *dev,
struct device_attribute *attr,
u8 bound_reg,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 data;
int ret;
ret = adt7310_spi_read_word(chip, bound_reg, &data);
if (ret)
return -EIO;
return adt7310_convert_temperature(chip, data, buf);
}
static inline ssize_t adt7310_set_t_bound(struct device *dev,
struct device_attribute *attr,
u8 bound_reg,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
long tmp1, tmp2;
u16 data;
char *pos;
int ret;
pos = strchr(buf, '.');
ret = strict_strtol(buf, 10, &tmp1);
if (ret || tmp1 > 127 || tmp1 < -128)
return -EINVAL;
if (pos) {
len = strlen(pos);
if (chip->config & ADT7310_RESOLUTION) {
if (len > ADT7310_T16_VALUE_FLOAT_OFFSET)
len = ADT7310_T16_VALUE_FLOAT_OFFSET;
pos[len] = 0;
ret = strict_strtol(pos, 10, &tmp2);
if (!ret)
tmp2 = (tmp2 / 78125) * 78125;
} else {
if (len > ADT7310_T13_VALUE_FLOAT_OFFSET)
len = ADT7310_T13_VALUE_FLOAT_OFFSET;
pos[len] = 0;
ret = strict_strtol(pos, 10, &tmp2);
if (!ret)
tmp2 = (tmp2 / 625) * 625;
}
}
if (tmp1 < 0)
data = (u16)(-tmp1);
else
data = (u16)tmp1;
if (chip->config & ADT7310_RESOLUTION) {
data = (data << ADT7310_T16_VALUE_FLOAT_OFFSET) |
(tmp2 & ADT7310_T16_VALUE_FLOAT_MASK);
if (tmp1 < 0)
/* convert positive value to supplyment */
data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
} else {
data = (data << ADT7310_T13_VALUE_FLOAT_OFFSET) |
(tmp2 & ADT7310_T13_VALUE_FLOAT_MASK);
if (tmp1 < 0)
/* convert positive value to supplyment */
data = (ADT7310_T13_VALUE_SIGN << 1) - data;
data <<= ADT7310_T13_VALUE_OFFSET;
}
ret = adt7310_spi_write_word(chip, bound_reg, data);
if (ret)
return -EIO;
return len;
}
static ssize_t adt7310_show_t_alarm_high(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return adt7310_show_t_bound(dev, attr,
ADT7310_T_ALARM_HIGH, buf);
}
static inline ssize_t adt7310_set_t_alarm_high(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
return adt7310_set_t_bound(dev, attr,
ADT7310_T_ALARM_HIGH, buf, len);
}
static ssize_t adt7310_show_t_alarm_low(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return adt7310_show_t_bound(dev, attr,
ADT7310_T_ALARM_LOW, buf);
}
static inline ssize_t adt7310_set_t_alarm_low(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
return adt7310_set_t_bound(dev, attr,
ADT7310_T_ALARM_LOW, buf, len);
}
static ssize_t adt7310_show_t_crit(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return adt7310_show_t_bound(dev, attr,
ADT7310_T_CRIT, buf);
}
static inline ssize_t adt7310_set_t_crit(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
return adt7310_set_t_bound(dev, attr,
ADT7310_T_CRIT, buf, len);
}
static ssize_t adt7310_show_t_hyst(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
u8 t_hyst;
ret = adt7310_spi_read_byte(chip, ADT7310_T_HYST, &t_hyst);
if (ret)
return -EIO;
return sprintf(buf, "%d\n", t_hyst & ADT7310_T_HYST_MASK);
}
static inline ssize_t adt7310_set_t_hyst(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
unsigned long data;
u8 t_hyst;
ret = strict_strtol(buf, 10, &data);
if (ret || data > ADT7310_T_HYST_MASK)
return -EINVAL;
t_hyst = (u8)data;
ret = adt7310_spi_write_byte(chip, ADT7310_T_HYST, t_hyst);
if (ret)
return -EIO;
return len;
}
static IIO_DEVICE_ATTR(event_mode,
S_IRUGO | S_IWUSR,
adt7310_show_event_mode, adt7310_set_event_mode, 0);
static IIO_DEVICE_ATTR(available_event_modes,
S_IRUGO | S_IWUSR,
adt7310_show_available_event_modes, NULL, 0);
static IIO_DEVICE_ATTR(fault_queue,
S_IRUGO | S_IWUSR,
adt7310_show_fault_queue, adt7310_set_fault_queue, 0);
static IIO_DEVICE_ATTR(t_alarm_high,
S_IRUGO | S_IWUSR,
adt7310_show_t_alarm_high, adt7310_set_t_alarm_high, 0);
static IIO_DEVICE_ATTR(t_alarm_low,
S_IRUGO | S_IWUSR,
adt7310_show_t_alarm_low, adt7310_set_t_alarm_low, 0);
static IIO_DEVICE_ATTR(t_crit,
S_IRUGO | S_IWUSR,
adt7310_show_t_crit, adt7310_set_t_crit, 0);
static IIO_DEVICE_ATTR(t_hyst,
S_IRUGO | S_IWUSR,
adt7310_show_t_hyst, adt7310_set_t_hyst, 0);
static struct attribute *adt7310_event_int_attributes[] = {
&iio_dev_attr_event_mode.dev_attr.attr,
&iio_dev_attr_available_event_modes.dev_attr.attr,
&iio_dev_attr_fault_queue.dev_attr.attr,
&iio_dev_attr_t_alarm_high.dev_attr.attr,
&iio_dev_attr_t_alarm_low.dev_attr.attr,
&iio_dev_attr_t_crit.dev_attr.attr,
&iio_dev_attr_t_hyst.dev_attr.attr,
NULL,
};
static struct attribute_group adt7310_event_attribute_group = {
.attrs = adt7310_event_int_attributes,
.name = "events",
};
static const struct iio_info adt7310_info = {
.attrs = &adt7310_attribute_group,
.event_attrs = &adt7310_event_attribute_group,
.driver_module = THIS_MODULE,
};
/*
* device probe and remove
*/
static int __devinit adt7310_probe(struct spi_device *spi_dev)
{
struct adt7310_chip_info *chip;
struct iio_dev *indio_dev;
int ret = 0;
unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
unsigned long irq_flags;
indio_dev = iio_allocate_device(sizeof(*chip));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
dev_set_drvdata(&spi_dev->dev, indio_dev);
chip->spi_dev = spi_dev;
indio_dev->dev.parent = &spi_dev->dev;
indio_dev->name = spi_get_device_id(spi_dev)->name;
indio_dev->info = &adt7310_info;
indio_dev->modes = INDIO_DIRECT_MODE;
/* CT critcal temperature event. line 0 */
if (spi_dev->irq) {
if (adt7310_platform_data[2])
irq_flags = adt7310_platform_data[2];
else
irq_flags = IRQF_TRIGGER_LOW;
ret = request_threaded_irq(spi_dev->irq,
NULL,
&adt7310_event_handler,
irq_flags,
indio_dev->name,
indio_dev);
if (ret)
goto error_free_dev;
}
/* INT bound temperature alarm event. line 1 */
if (adt7310_platform_data[0]) {
ret = request_threaded_irq(adt7310_platform_data[0],
NULL,
&adt7310_event_handler,
adt7310_platform_data[1],
indio_dev->name,
indio_dev);
if (ret)
goto error_unreg_ct_irq;
}
if (spi_dev->irq && adt7310_platform_data[0]) {
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
if (ret) {
ret = -EIO;
goto error_unreg_int_irq;
}
/* set irq polarity low level */
chip->config &= ~ADT7310_CT_POLARITY;
if (adt7310_platform_data[1] & IRQF_TRIGGER_HIGH)
chip->config |= ADT7310_INT_POLARITY;
else
chip->config &= ~ADT7310_INT_POLARITY;
ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, chip->config);
if (ret) {
ret = -EIO;
goto error_unreg_int_irq;
}
}
ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_int_irq;
dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
indio_dev->name);
return 0;
error_unreg_int_irq:
free_irq(adt7310_platform_data[0], indio_dev);
error_unreg_ct_irq:
free_irq(spi_dev->irq, indio_dev);
error_free_dev:
iio_free_device(indio_dev);
error_ret:
return ret;
}
static int __devexit adt7310_remove(struct spi_device *spi_dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
iio_device_unregister(indio_dev);
dev_set_drvdata(&spi_dev->dev, NULL);
if (adt7310_platform_data[0])
free_irq(adt7310_platform_data[0], indio_dev);
if (spi_dev->irq)
free_irq(spi_dev->irq, indio_dev);
iio_free_device(indio_dev);
return 0;
}
static const struct spi_device_id adt7310_id[] = {
{ "adt7310", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
.owner = THIS_MODULE,
},
.probe = adt7310_probe,
.remove = __devexit_p(adt7310_remove),
.id_table = adt7310_id,
};
module_spi_driver(adt7310_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
santod/nuk3rn3l_htc_msm8960-revamped | drivers/staging/iio/dac/ad5686.c | 4878 | 11514 | /*
* AD5686R, AD5685R, AD5684R Digital to analog converters driver
*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
#include "../iio.h"
#include "../sysfs.h"
#include "dac.h"
#define AD5686_DAC_CHANNELS 4
#define AD5686_ADDR(x) ((x) << 16)
#define AD5686_CMD(x) ((x) << 20)
#define AD5686_ADDR_DAC(chan) (0x1 << (chan))
#define AD5686_ADDR_ALL_DAC 0xF
#define AD5686_CMD_NOOP 0x0
#define AD5686_CMD_WRITE_INPUT_N 0x1
#define AD5686_CMD_UPDATE_DAC_N 0x2
#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3
#define AD5686_CMD_POWERDOWN_DAC 0x4
#define AD5686_CMD_LDAC_MASK 0x5
#define AD5686_CMD_RESET 0x6
#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7
#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8
#define AD5686_CMD_READBACK_ENABLE 0x9
#define AD5686_LDAC_PWRDN_NONE 0x0
#define AD5686_LDAC_PWRDN_1K 0x1
#define AD5686_LDAC_PWRDN_100K 0x2
#define AD5686_LDAC_PWRDN_3STATE 0x3
/**
* struct ad5686_chip_info - chip specific information
* @int_vref_mv: AD5620/40/60: the internal reference voltage
* @channel: channel specification
*/
struct ad5686_chip_info {
u16 int_vref_mv;
struct iio_chan_spec channel[AD5686_DAC_CHANNELS];
};
/**
* struct ad5446_state - driver instance specific data
* @spi: spi_device
* @chip_info: chip model specific constants, available modes etc
* @reg: supply regulator
* @vref_mv: actual reference voltage used
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
* @data: spi transfer buffers
*/
struct ad5686_state {
struct spi_device *spi;
const struct ad5686_chip_info *chip_info;
struct regulator *reg;
unsigned short vref_mv;
unsigned pwr_down_mask;
unsigned pwr_down_mode;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
union {
u32 d32;
u8 d8[4];
} data[3] ____cacheline_aligned;
};
/**
* ad5686_supported_device_ids:
*/
enum ad5686_supported_device_ids {
ID_AD5684,
ID_AD5685,
ID_AD5686,
};
#define AD5868_CHANNEL(chan, bits, shift) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = chan, \
.info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = AD5686_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', bits, 16, shift) \
}
static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
[ID_AD5684] = {
.channel[0] = AD5868_CHANNEL(0, 12, 4),
.channel[1] = AD5868_CHANNEL(1, 12, 4),
.channel[2] = AD5868_CHANNEL(2, 12, 4),
.channel[3] = AD5868_CHANNEL(3, 12, 4),
.int_vref_mv = 2500,
},
[ID_AD5685] = {
.channel[0] = AD5868_CHANNEL(0, 14, 2),
.channel[1] = AD5868_CHANNEL(1, 14, 2),
.channel[2] = AD5868_CHANNEL(2, 14, 2),
.channel[3] = AD5868_CHANNEL(3, 14, 2),
.int_vref_mv = 2500,
},
[ID_AD5686] = {
.channel[0] = AD5868_CHANNEL(0, 16, 0),
.channel[1] = AD5868_CHANNEL(1, 16, 0),
.channel[2] = AD5868_CHANNEL(2, 16, 0),
.channel[3] = AD5868_CHANNEL(3, 16, 0),
.int_vref_mv = 2500,
},
};
static int ad5686_spi_write(struct ad5686_state *st,
u8 cmd, u8 addr, u16 val, u8 shift)
{
val <<= shift;
st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
AD5686_ADDR(addr) |
val);
return spi_write(st->spi, &st->data[0].d8[1], 3);
}
static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
{
struct spi_transfer t[] = {
{
.tx_buf = &st->data[0].d8[1],
.len = 3,
.cs_change = 1,
}, {
.tx_buf = &st->data[1].d8[1],
.rx_buf = &st->data[2].d8[1],
.len = 3,
},
};
struct spi_message m;
int ret;
spi_message_init(&m);
spi_message_add_tail(&t[0], &m);
spi_message_add_tail(&t[1], &m);
st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
AD5686_ADDR(addr));
st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
ret = spi_sync(st->spi, &m);
if (ret < 0)
return ret;
return be32_to_cpu(st->data[2].d32);
}
static ssize_t ad5686_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
return sprintf(buf, "%s\n", mode[(st->pwr_down_mode >>
(this_attr->address * 2)) & 0x3]);
}
static ssize_t ad5686_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned mode;
if (sysfs_streq(buf, "1kohm_to_gnd"))
mode = AD5686_LDAC_PWRDN_1K;
else if (sysfs_streq(buf, "100kohm_to_gnd"))
mode = AD5686_LDAC_PWRDN_100K;
else if (sysfs_streq(buf, "three_state"))
mode = AD5686_LDAC_PWRDN_3STATE;
else
return -EINVAL;
st->pwr_down_mode &= ~(0x3 << (this_attr->address * 2));
st->pwr_down_mode |= (mode << (this_attr->address * 2));
return len;
}
static ssize_t ad5686_read_dac_powerdown(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
return sprintf(buf, "%d\n", !!(st->pwr_down_mask &
(0x3 << (this_attr->address * 2))));
}
static ssize_t ad5686_write_dac_powerdown(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
bool readin;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5686_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = strtobool(buf, &readin);
if (ret)
return ret;
if (readin == true)
st->pwr_down_mask |= (0x3 << (this_attr->address * 2));
else
st->pwr_down_mask &= ~(0x3 << (this_attr->address * 2));
ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0,
st->pwr_down_mask & st->pwr_down_mode, 0);
return ret ? ret : len;
}
static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
"1kohm_to_gnd 100kohm_to_gnd three_state");
#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_num) \
IIO_DEVICE_ATTR(out_voltage##_num##_powerdown_mode, \
S_IRUGO | S_IWUSR, \
ad5686_read_powerdown_mode, \
ad5686_write_powerdown_mode, _num)
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
#define IIO_DEV_ATTR_DAC_POWERDOWN(_num) \
IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \
S_IRUGO | S_IWUSR, \
ad5686_read_dac_powerdown, \
ad5686_write_dac_powerdown, _num)
static IIO_DEV_ATTR_DAC_POWERDOWN(0);
static IIO_DEV_ATTR_DAC_POWERDOWN(1);
static IIO_DEV_ATTR_DAC_POWERDOWN(2);
static IIO_DEV_ATTR_DAC_POWERDOWN(3);
static struct attribute *ad5686_attributes[] = {
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
static const struct attribute_group ad5686_attribute_group = {
.attrs = ad5686_attributes,
};
static int ad5686_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad5686_state *st = iio_priv(indio_dev);
unsigned long scale_uv;
int ret;
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
ret = ad5686_spi_read(st, chan->address);
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_mv * 100000)
>> (chan->scan_type.realbits);
*val = scale_uv / 100000;
*val2 = (scale_uv % 100000) * 10;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
static int ad5686_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct ad5686_state *st = iio_priv(indio_dev);
int ret;
switch (mask) {
case 0:
if (val > (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
ret = ad5686_spi_write(st,
AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address,
val,
chan->scan_type.shift);
mutex_unlock(&indio_dev->mlock);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct iio_info ad5686_info = {
.read_raw = ad5686_read_raw,
.write_raw = ad5686_write_raw,
.attrs = &ad5686_attribute_group,
.driver_module = THIS_MODULE,
};
static int __devinit ad5686_probe(struct spi_device *spi)
{
struct ad5686_state *st;
struct iio_dev *indio_dev;
int ret, regdone = 0, voltage_uv = 0;
indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
st->reg = regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
voltage_uv = regulator_get_voltage(st->reg);
}
st->chip_info =
&ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data];
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else
st->vref_mv = st->chip_info->int_vref_mv;
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5686_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channel;
indio_dev->num_channels = AD5686_DAC_CHANNELS;
regdone = 1;
ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 0);
if (ret)
goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
return 0;
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
iio_free_device(indio_dev);
return ret;
}
static int __devexit ad5686_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5686_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
iio_free_device(indio_dev);
return 0;
}
static const struct spi_device_id ad5686_id[] = {
{"ad5684", ID_AD5684},
{"ad5685", ID_AD5685},
{"ad5686", ID_AD5686},
{}
};
MODULE_DEVICE_TABLE(spi, ad5686_id);
static struct spi_driver ad5686_driver = {
.driver = {
.name = "ad5686",
.owner = THIS_MODULE,
},
.probe = ad5686_probe,
.remove = __devexit_p(ad5686_remove),
.id_table = ad5686_id,
};
module_spi_driver(ad5686_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
shskyinfo/android_kernel_lge_e610 | drivers/scsi/aic7xxx/aic79xx_osm.c | 5134 | 79980 | /*
* Adaptec AIC79xx device driver for Linux.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
*
* --------------------------------------------------------------------------
* Copyright (c) 1994-2000 Justin T. Gibbs.
* Copyright (c) 1997-1999 Doug Ledford
* Copyright (c) 2000-2003 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
#include <scsi/scsicam.h>
static struct scsi_transport_template *ahd_linux_transport_template = NULL;
#include <linux/init.h> /* __setup */
#include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */
#include <linux/delay.h> /* For ssleep/msleep */
#include <linux/device.h>
#include <linux/slab.h>
/*
* Bucket size for counting good commands in between bad ones.
*/
#define AHD_LINUX_ERR_THRESH 1000
/*
* Set this to the delay in seconds after SCSI bus reset.
* Note, we honor this only for the initial bus reset.
* The scsi error recovery code performs its own bus settle
* delay handling for error recovery actions.
*/
#ifdef CONFIG_AIC79XX_RESET_DELAY_MS
#define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
#else
#define AIC79XX_RESET_DELAY 5000
#endif
/*
* To change the default number of tagged transactions allowed per-device,
* add a line to the lilo.conf file like:
* append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
* which will result in the first four devices on the first two
* controllers being set to a tagged queue depth of 32.
*
* The tag_commands is an array of 16 to allow for wide and twin adapters.
* Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
* for channel 1.
*/
typedef struct {
uint16_t tag_commands[16]; /* Allow for wide/twin adapters. */
} adapter_tag_info_t;
/*
* Modify this as you see fit for your system.
*
* 0 tagged queuing disabled
* 1 <= n <= 253 n == max tags ever dispatched.
*
* The driver will throttle the number of commands dispatched to a
* device if it returns queue full. For devices with a fixed maximum
* queue depth, the driver will eventually determine this depth and
* lock it in (a console message is printed to indicate that a lock
* has occurred). On some devices, queue full is returned for a temporary
* resource shortage. These devices will return queue full at varying
* depths. The driver will throttle back when the queue fulls occur and
* attempt to slowly increase the depth over time as the device recovers
* from the resource shortage.
*
* In this example, the first line will disable tagged queueing for all
* the devices on the first probed aic79xx adapter.
*
* The second line enables tagged queueing with 4 commands/LUN for IDs
* (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
* driver to attempt to use up to 64 tags for ID 1.
*
* The third line is the same as the first line.
*
* The fourth line disables tagged queueing for devices 0 and 3. It
* enables tagged queueing for the other IDs, with 16 commands/LUN
* for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
* IDs 2, 5-7, and 9-15.
*/
/*
* NOTE: The below structure is for reference only, the actual structure
* to modify in order to change things is just below this comment block.
adapter_tag_info_t aic79xx_tag_info[] =
{
{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
};
*/
#ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
#define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
#else
#define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
#endif
#define AIC79XX_CONFIGED_TAG_COMMANDS { \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE, \
AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE \
}
/*
* By default, use the number of commands specified by
* the users kernel configuration.
*/
static adapter_tag_info_t aic79xx_tag_info[] =
{
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS},
{AIC79XX_CONFIGED_TAG_COMMANDS}
};
/*
* The I/O cell on the chip is very configurable in respect to its analog
* characteristics. Set the defaults here; they can be overriden with
* the proper insmod parameters.
*/
struct ahd_linux_iocell_opts
{
uint8_t precomp;
uint8_t slewrate;
uint8_t amplitude;
};
#define AIC79XX_DEFAULT_PRECOMP 0xFF
#define AIC79XX_DEFAULT_SLEWRATE 0xFF
#define AIC79XX_DEFAULT_AMPLITUDE 0xFF
#define AIC79XX_DEFAULT_IOOPTS \
{ \
AIC79XX_DEFAULT_PRECOMP, \
AIC79XX_DEFAULT_SLEWRATE, \
AIC79XX_DEFAULT_AMPLITUDE \
}
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS
};
/*
* There should be a specific return value for this in scsi.h, but
* it seems that most drivers ignore it.
*/
#define DID_UNDERFLOW DID_ERROR
void
ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
{
printk("(scsi%d:%c:%d:%d): ",
ahd->platform_data->host->host_no,
scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
scb != NULL ? SCB_GET_LUN(scb) : -1);
}
/*
* XXX - these options apply unilaterally to _all_ adapters
* cards in the system. This should be fixed. Exceptions to this
* rule are noted in the comments.
*/
/*
* Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
* has no effect on any later resets that might occur due to things like
* SCSI bus timeouts.
*/
static uint32_t aic79xx_no_reset;
/*
* Should we force EXTENDED translation on a controller.
* 0 == Use whatever is in the SEEPROM or default to off
* 1 == Use whatever is in the SEEPROM or default to on
*/
static uint32_t aic79xx_extended;
/*
* PCI bus parity checking of the Adaptec controllers. This is somewhat
* dubious at best. To my knowledge, this option has never actually
* solved a PCI parity problem, but on certain machines with broken PCI
* chipset configurations, it can generate tons of false error messages.
* It's included in the driver for completeness.
* 0 = Shut off PCI parity check
* non-0 = Enable PCI parity check
*
* NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
* variable to -1 you would actually want to simply pass the variable
* name without a number. That will invert the 0 which will result in
* -1.
*/
static uint32_t aic79xx_pci_parity = ~0;
/*
* There are lots of broken chipsets in the world. Some of them will
* violate the PCI spec when we issue byte sized memory writes to our
* controller. I/O mapped register access, if allowed by the given
* platform, will work in almost all cases.
*/
uint32_t aic79xx_allow_memio = ~0;
/*
* So that we can set how long each device is given as a selection timeout.
* The table of values goes like this:
* 0 - 256ms
* 1 - 128ms
* 2 - 64ms
* 3 - 32ms
* We default to 256ms because some older devices need a longer time
* to respond to initial selection.
*/
static uint32_t aic79xx_seltime;
/*
* Certain devices do not perform any aging on commands. Should the
* device be saturated by commands in one portion of the disk, it is
* possible for transactions on far away sectors to never be serviced.
* To handle these devices, we can periodically send an ordered tag to
* force all outstanding transactions to be serviced prior to a new
* transaction.
*/
static uint32_t aic79xx_periodic_otag;
/* Some storage boxes are using an LSI chip which has a bug making it
* impossible to use aic79xx Rev B chip in 320 speeds. The following
* storage boxes have been reported to be buggy:
* EonStor 3U 16-Bay: U16U-G3A3
* EonStor 2U 12-Bay: U12U-G3A3
* SentinelRAID: 2500F R5 / R6
* SentinelRAID: 2500F R1
* SentinelRAID: 2500F/1500F
* SentinelRAID: 150F
*
* To get around this LSI bug, you can set your board to 160 mode
* or you can enable the SLOWCRC bit.
*/
uint32_t aic79xx_slowcrc;
/*
* Module information and settable options.
*/
static char *aic79xx = NULL;
MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
MODULE_DESCRIPTION("Adaptec AIC790X U320 SCSI Host Bus Adapter driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(AIC79XX_DRIVER_VERSION);
module_param(aic79xx, charp, 0444);
MODULE_PARM_DESC(aic79xx,
"period-delimited options string:\n"
" verbose Enable verbose/diagnostic logging\n"
" allow_memio Allow device registers to be memory mapped\n"
" debug Bitmask of debug values to enable\n"
" no_reset Suppress initial bus resets\n"
" extended Enable extended geometry on all controllers\n"
" periodic_otag Send an ordered tagged transaction\n"
" periodically to prevent tag starvation.\n"
" This may be required by some older disk\n"
" or drives/RAID arrays.\n"
" tag_info:<tag_str> Set per-target tag depth\n"
" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
" precomp:<pcomp_list> Set the signal precompensation (0-7).\n"
" amplitude:<int> Set the signal amplitude (0-7).\n"
" seltime:<int> Selection Timeout:\n"
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
" slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
"\n"
" Sample modprobe configuration file:\n"
" # Enable verbose logging\n"
" # Set tag depth on Controller 2/Target 2 to 10 tags\n"
" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
);
static void ahd_linux_handle_scsi_status(struct ahd_softc *,
struct scsi_device *,
struct scb *);
static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
struct scsi_cmnd *cmd);
static int ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd);
static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo);
static void ahd_linux_device_queue_depth(struct scsi_device *);
static int ahd_linux_run_command(struct ahd_softc*,
struct ahd_linux_device *,
struct scsi_cmnd *);
static void ahd_linux_setup_tag_info_global(char *p);
static int aic79xx_setup(char *c);
static void ahd_freeze_simq(struct ahd_softc *ahd);
static void ahd_release_simq(struct ahd_softc *ahd);
static int ahd_linux_unit;
/************************** OS Utility Wrappers *******************************/
void ahd_delay(long);
void
ahd_delay(long usec)
{
/*
* udelay on Linux can have problems for
* multi-millisecond waits. Wait at most
* 1024us per call.
*/
while (usec > 0) {
udelay(usec % 1024);
usec -= 1024;
}
}
/***************************** Low Level I/O **********************************/
uint8_t ahd_inb(struct ahd_softc * ahd, long port);
void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
void ahd_outw_atomic(struct ahd_softc * ahd,
long port, uint16_t val);
void ahd_outsb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
void ahd_insb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
uint8_t
ahd_inb(struct ahd_softc * ahd, long port)
{
uint8_t x;
if (ahd->tags[0] == BUS_SPACE_MEMIO) {
x = readb(ahd->bshs[0].maddr + port);
} else {
x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
}
mb();
return (x);
}
#if 0 /* unused */
static uint16_t
ahd_inw_atomic(struct ahd_softc * ahd, long port)
{
uint8_t x;
if (ahd->tags[0] == BUS_SPACE_MEMIO) {
x = readw(ahd->bshs[0].maddr + port);
} else {
x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
}
mb();
return (x);
}
#endif
void
ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
{
if (ahd->tags[0] == BUS_SPACE_MEMIO) {
writeb(val, ahd->bshs[0].maddr + port);
} else {
outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
}
mb();
}
void
ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
{
if (ahd->tags[0] == BUS_SPACE_MEMIO) {
writew(val, ahd->bshs[0].maddr + port);
} else {
outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
}
mb();
}
void
ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
{
int i;
/*
* There is probably a more efficient way to do this on Linux
* but we don't use this for anything speed critical and this
* should work.
*/
for (i = 0; i < count; i++)
ahd_outb(ahd, port, *array++);
}
void
ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
{
int i;
/*
* There is probably a more efficient way to do this on Linux
* but we don't use this for anything speed critical and this
* should work.
*/
for (i = 0; i < count; i++)
*array++ = ahd_inb(ahd, port);
}
/******************************* PCI Routines *********************************/
uint32_t
ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
{
switch (width) {
case 1:
{
uint8_t retval;
pci_read_config_byte(pci, reg, &retval);
return (retval);
}
case 2:
{
uint16_t retval;
pci_read_config_word(pci, reg, &retval);
return (retval);
}
case 4:
{
uint32_t retval;
pci_read_config_dword(pci, reg, &retval);
return (retval);
}
default:
panic("ahd_pci_read_config: Read size too big");
/* NOTREACHED */
return (0);
}
}
void
ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
{
switch (width) {
case 1:
pci_write_config_byte(pci, reg, value);
break;
case 2:
pci_write_config_word(pci, reg, value);
break;
case 4:
pci_write_config_dword(pci, reg, value);
break;
default:
panic("ahd_pci_write_config: Write size too big");
/* NOTREACHED */
}
}
/****************************** Inlines ***************************************/
static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
static void
ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
cmd = scb->io_ctx;
ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
scsi_dma_unmap(cmd);
}
/******************************** Macros **************************************/
#define BUILD_SCSIID(ahd, cmd) \
(((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
/*
* Return a string describing the driver.
*/
static const char *
ahd_linux_info(struct Scsi_Host *host)
{
static char buffer[512];
char ahd_info[256];
char *bp;
struct ahd_softc *ahd;
bp = &buffer[0];
ahd = *(struct ahd_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
" <");
strcat(bp, ahd->description);
strcat(bp, ">\n"
" ");
ahd_controller_info(ahd, ahd_info);
strcat(bp, ahd_info);
return (bp);
}
/*
* Queue an SCB to the controller.
*/
static int
ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
int rtn = SCSI_MLQUEUE_HOST_BUSY;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahd_linux_run_command(ahd, dev, cmd);
return rtn;
}
static DEF_SCSI_QCMD(ahd_linux_queue)
static struct scsi_target **
ahd_linux_target_in_softc(struct scsi_target *starget)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
unsigned int target_offset;
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
return &ahd->platform_data->starget[target_offset];
}
static int
ahd_linux_target_alloc(struct scsi_target *starget)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)dev_to_shost(&starget->dev)->hostdata);
struct seeprom_config *sc = ahd->seep_config;
unsigned long flags;
struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
struct ahd_devinfo devinfo;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
char channel = starget->channel + 'A';
ahd_lock(ahd, &flags);
BUG_ON(*ahd_targp != NULL);
*ahd_targp = starget;
if (sc) {
int flags = sc->device_flags[starget->id];
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
starget->id, &tstate);
if ((flags & CFPACKETIZED) == 0) {
/* don't negotiate packetized (IU) transfers */
spi_max_iu(starget) = 0;
} else {
if ((ahd->features & AHD_RTI) == 0)
spi_rti(starget) = 0;
}
if ((flags & CFQAS) == 0)
spi_max_qas(starget) = 0;
/* Transinfo values have been set to BIOS settings */
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
spi_min_period(starget) = tinfo->user.period;
spi_max_offset(starget) = tinfo->user.offset;
}
tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
starget->id, &tstate);
ahd_compile_devinfo(&devinfo, ahd->our_id, starget->id,
CAM_LUN_WILDCARD, channel,
ROLE_INITIATOR);
ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
AHD_TRANS_GOAL, /*paused*/FALSE);
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_GOAL, /*paused*/FALSE);
ahd_unlock(ahd, &flags);
return 0;
}
static void
ahd_linux_target_destroy(struct scsi_target *starget)
{
struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
*ahd_targp = NULL;
}
static int
ahd_linux_slave_alloc(struct scsi_device *sdev)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)sdev->host->hostdata);
struct ahd_linux_device *dev;
if (bootverbose)
printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
dev = scsi_transport_device_data(sdev);
memset(dev, 0, sizeof(*dev));
/*
* We start out life using untagged
* transactions of which we allow one.
*/
dev->openings = 1;
/*
* Set maxtags to 0. This will be changed if we
* later determine that we are dealing with
* a tagged queuing capable device.
*/
dev->maxtags = 0;
return (0);
}
static int
ahd_linux_slave_configure(struct scsi_device *sdev)
{
struct ahd_softc *ahd;
ahd = *((struct ahd_softc **)sdev->host->hostdata);
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
ahd_linux_device_queue_depth(sdev);
/* Initial Domain Validation */
if (!spi_initial_dv(sdev->sdev_target))
spi_dv_device(sdev);
return 0;
}
#if defined(__i386__)
/*
* Return the disk geometry for the given SCSI device.
*/
static int
ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
uint8_t *bh;
int heads;
int sectors;
int cylinders;
int ret;
int extended;
struct ahd_softc *ahd;
ahd = *((struct ahd_softc **)sdev->host->hostdata);
bh = scsi_bios_ptable(bdev);
if (bh) {
ret = scsi_partsize(bh, capacity,
&geom[2], &geom[0], &geom[1]);
kfree(bh);
if (ret != -1)
return (ret);
}
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
if (aic79xx_extended != 0)
extended = 1;
else
extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
if (extended && cylinders >= 1024) {
heads = 255;
sectors = 63;
cylinders = aic_sector_div(capacity, heads, sectors);
}
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return (0);
}
#endif
/*
* Abort the current SCSI command(s).
*/
static int
ahd_linux_abort(struct scsi_cmnd *cmd)
{
int error;
error = ahd_linux_queue_abort_cmd(cmd);
return error;
}
/*
* Attempt to send a target reset message to the device that timed out.
*/
static int
ahd_linux_dev_reset(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev;
struct scb *reset_scb;
u_int cdb_byte;
int retval = SUCCESS;
int paused;
int wait;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(done);
reset_scb = NULL;
paused = FALSE;
wait = FALSE;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
scmd_printk(KERN_INFO, cmd,
"Attempting to queue a TARGET RESET message:");
printk("CDB:");
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
printk(" 0x%x", cmd->cmnd[cdb_byte]);
printk("\n");
/*
* Determine if we currently own this command.
*/
dev = scsi_transport_device_data(cmd->device);
if (dev == NULL) {
/*
* No target device for this command exists,
* so we must not still own the command.
*/
scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
return SUCCESS;
}
/*
* Generate us a new SCB
*/
reset_scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX);
if (!reset_scb) {
scmd_printk(KERN_INFO, cmd, "No SCB available\n");
return FAILED;
}
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
cmd->device->id, &tstate);
reset_scb->io_ctx = cmd;
reset_scb->platform_data->dev = dev;
reset_scb->sg_count = 0;
ahd_set_residual(reset_scb, 0);
ahd_set_sense_residual(reset_scb, 0);
reset_scb->platform_data->xfer_len = 0;
reset_scb->hscb->control = 0;
reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd);
reset_scb->hscb->lun = cmd->device->lun;
reset_scb->hscb->cdb_len = 0;
reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET;
reset_scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
reset_scb->flags |= SCB_PACKETIZED;
} else {
reset_scb->hscb->control |= MK_MESSAGE;
}
dev->openings--;
dev->active++;
dev->commands_issued++;
ahd_lock(ahd, &flags);
LIST_INSERT_HEAD(&ahd->pending_scbs, reset_scb, pending_links);
ahd_queue_scb(ahd, reset_scb);
ahd->platform_data->eh_done = &done;
ahd_unlock(ahd, &flags);
printk("%s: Device reset code sleeping\n", ahd_name(ahd));
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
ahd_lock(ahd, &flags);
ahd->platform_data->eh_done = NULL;
ahd_unlock(ahd, &flags);
printk("%s: Device reset timer expired (active %d)\n",
ahd_name(ahd), dev->active);
retval = FAILED;
}
printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
return (retval);
}
/*
* Reset the SCSI bus.
*/
static int
ahd_linux_bus_reset(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
int found;
unsigned long flags;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
printk("%s: Bus reset called for cmd %p\n",
ahd_name(ahd), cmd);
#endif
ahd_lock(ahd, &flags);
found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE);
ahd_unlock(ahd, &flags);
if (bootverbose)
printk("%s: SCSI bus reset delivered. "
"%d SCBs aborted.\n", ahd_name(ahd), found);
return (SUCCESS);
}
struct scsi_host_template aic79xx_driver_template = {
.module = THIS_MODULE,
.name = "aic79xx",
.proc_name = "aic79xx",
.proc_info = ahd_linux_proc_info,
.info = ahd_linux_info,
.queuecommand = ahd_linux_queue,
.eh_abort_handler = ahd_linux_abort,
.eh_device_reset_handler = ahd_linux_dev_reset,
.eh_bus_reset_handler = ahd_linux_bus_reset,
#if defined(__i386__)
.bios_param = ahd_linux_biosparam,
#endif
.can_queue = AHD_MAX_QUEUE,
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
.use_clustering = ENABLE_CLUSTERING,
.slave_alloc = ahd_linux_slave_alloc,
.slave_configure = ahd_linux_slave_configure,
.target_alloc = ahd_linux_target_alloc,
.target_destroy = ahd_linux_target_destroy,
};
/******************************** Bus DMA *************************************/
int
ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
bus_size_t alignment, bus_size_t boundary,
dma_addr_t lowaddr, dma_addr_t highaddr,
bus_dma_filter_t *filter, void *filterarg,
bus_size_t maxsize, int nsegments,
bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
{
bus_dma_tag_t dmat;
dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
if (dmat == NULL)
return (ENOMEM);
/*
* Linux is very simplistic about DMA memory. For now don't
* maintain all specification information. Once Linux supplies
* better facilities for doing these operations, or the
* needs of this particular driver change, we might need to do
* more here.
*/
dmat->alignment = alignment;
dmat->boundary = boundary;
dmat->maxsize = maxsize;
*ret_tag = dmat;
return (0);
}
void
ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
{
kfree(dmat);
}
int
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
*vaddr = pci_alloc_consistent(ahd->dev_softc,
dmat->maxsize, mapp);
if (*vaddr == NULL)
return (ENOMEM);
return(0);
}
void
ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
pci_free_consistent(ahd->dev_softc, dmat->maxsize,
vaddr, map);
}
int
ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
void *cb_arg, int flags)
{
/*
* Assume for now that this will only be used during
* initialization and not for per-transaction buffer mapping.
*/
bus_dma_segment_t stack_sg;
stack_sg.ds_addr = map;
stack_sg.ds_len = dmat->maxsize;
cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
return (0);
}
void
ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
{
}
int
ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
{
/* Nothing to do */
return (0);
}
/********************* Platform Dependent Functions ***************************/
static void
ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
{
if ((instance >= 0)
&& (instance < ARRAY_SIZE(aic79xx_iocell_info))) {
uint8_t *iocell_info;
iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
iocell_info[index] = value & 0xFFFF;
if (bootverbose)
printk("iocell[%d:%ld] = %d\n", instance, index, value);
}
}
static void
ahd_linux_setup_tag_info_global(char *p)
{
int tags, i, j;
tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
printk("Setting Global Tags= %d\n", tags);
for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) {
for (j = 0; j < AHD_NUM_TARGETS; j++) {
aic79xx_tag_info[i].tag_commands[j] = tags;
}
}
}
static void
ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
{
if ((instance >= 0) && (targ >= 0)
&& (instance < ARRAY_SIZE(aic79xx_tag_info))
&& (targ < AHD_NUM_TARGETS)) {
aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
if (bootverbose)
printk("tag_info[%d:%d] = %d\n", instance, targ, value);
}
}
static char *
ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
void (*callback)(u_long, int, int, int32_t),
u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printk("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}
/*
* Handle Linux boot parameters. This routine allows for assigning a value
* to a parameter with a ':' between the parameter and the value.
* ie. aic79xx=stpwlev:1,extended
*/
static int
aic79xx_setup(char *s)
{
int i, n;
char *p;
char *end;
static const struct {
const char *name;
uint32_t *flag;
} options[] = {
{ "extended", &aic79xx_extended },
{ "no_reset", &aic79xx_no_reset },
{ "verbose", &aic79xx_verbose },
{ "allow_memio", &aic79xx_allow_memio},
#ifdef AHD_DEBUG
{ "debug", &ahd_debug },
#endif
{ "periodic_otag", &aic79xx_periodic_otag },
{ "pci_parity", &aic79xx_pci_parity },
{ "seltime", &aic79xx_seltime },
{ "tag_info", NULL },
{ "global_tag_depth", NULL},
{ "slewrate", NULL },
{ "precomp", NULL },
{ "amplitude", NULL },
{ "slowcrc", &aic79xx_slowcrc },
};
end = strchr(s, '\0');
/*
* XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
* will never be 0 in this case.
*/
n = 0;
while ((p = strsep(&s, ",.")) != NULL) {
if (*p == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(options); i++) {
n = strlen(options[i].name);
if (strncmp(options[i].name, p, n) == 0)
break;
}
if (i == ARRAY_SIZE(options))
continue;
if (strncmp(p, "global_tag_depth", n) == 0) {
ahd_linux_setup_tag_info_global(p + n);
} else if (strncmp(p, "tag_info", n) == 0) {
s = ahd_parse_brace_option("tag_info", p + n, end,
2, ahd_linux_setup_tag_info, 0);
} else if (strncmp(p, "slewrate", n) == 0) {
s = ahd_parse_brace_option("slewrate",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_SLEWRATE_INDEX);
} else if (strncmp(p, "precomp", n) == 0) {
s = ahd_parse_brace_option("precomp",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_PRECOMP_INDEX);
} else if (strncmp(p, "amplitude", n) == 0) {
s = ahd_parse_brace_option("amplitude",
p + n, end, 1, ahd_linux_setup_iocell_info,
AIC79XX_AMPLITUDE_INDEX);
} else if (p[n] == ':') {
*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
} else if (!strncmp(p, "verbose", n)) {
*(options[i].flag) = 1;
} else {
*(options[i].flag) ^= 0xFFFFFFFF;
}
}
return 1;
}
__setup("aic79xx=", aic79xx_setup);
uint32_t aic79xx_verbose;
int
ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *template)
{
char buf[80];
struct Scsi_Host *host;
char *new_name;
u_long s;
int retval;
template->name = ahd->description;
host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
if (host == NULL)
return (ENOMEM);
*((struct ahd_softc **)host->hostdata) = ahd;
ahd->platform_data->host = host;
host->can_queue = AHD_MAX_QUEUE;
host->cmd_per_lun = 2;
host->sg_tablesize = AHD_NSEG;
host->this_id = ahd->our_id;
host->irq = ahd->platform_data->irq;
host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
host->max_lun = AHD_NUM_LUNS;
host->max_channel = 0;
host->sg_tablesize = AHD_NSEG;
ahd_lock(ahd, &s);
ahd_set_unit(ahd, ahd_linux_unit++);
ahd_unlock(ahd, &s);
sprintf(buf, "scsi%d", host->host_no);
new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
if (new_name != NULL) {
strcpy(new_name, buf);
ahd_set_name(ahd, new_name);
}
host->unique_id = ahd->unit;
ahd_linux_initialize_scsi_bus(ahd);
ahd_intr_enable(ahd, TRUE);
host->transportt = ahd_linux_transport_template;
retval = scsi_add_host(host, &ahd->dev_softc->dev);
if (retval) {
printk(KERN_WARNING "aic79xx: scsi_add_host failed\n");
scsi_host_put(host);
return retval;
}
scsi_scan_host(host);
return 0;
}
/*
* Place the SCSI bus into a known state by either resetting it,
* or forcing transfer negotiations on the next command to any
* target.
*/
static void
ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
{
u_int target_id;
u_int numtarg;
unsigned long s;
target_id = 0;
numtarg = 0;
if (aic79xx_no_reset != 0)
ahd->flags &= ~AHD_RESET_BUS_A;
if ((ahd->flags & AHD_RESET_BUS_A) != 0)
ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
else
numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
ahd_lock(ahd, &s);
/*
* Force negotiation to async for all targets that
* will not see an initial bus reset.
*/
for (; target_id < numtarg; target_id++) {
struct ahd_devinfo devinfo;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
target_id, &tstate);
ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
ahd_update_neg_request(ahd, &devinfo, tstate,
tinfo, AHD_NEG_ALWAYS);
}
ahd_unlock(ahd, &s);
/* Give the bus some time to recover */
if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
ahd_freeze_simq(ahd);
msleep(AIC79XX_RESET_DELAY);
ahd_release_simq(ahd);
}
}
int
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
{
ahd->platform_data =
kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
if (ahd->platform_data == NULL)
return (ENOMEM);
memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
ahd->platform_data->irq = AHD_LINUX_NOIRQ;
ahd_lockinit(ahd);
ahd->seltime = (aic79xx_seltime & 0x3) << 4;
return (0);
}
void
ahd_platform_free(struct ahd_softc *ahd)
{
struct scsi_target *starget;
int i;
if (ahd->platform_data != NULL) {
/* destroy all of the device and target objects */
for (i = 0; i < AHD_NUM_TARGETS; i++) {
starget = ahd->platform_data->starget[i];
if (starget != NULL) {
ahd->platform_data->starget[i] = NULL;
}
}
if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
free_irq(ahd->platform_data->irq, ahd);
if (ahd->tags[0] == BUS_SPACE_PIO
&& ahd->bshs[0].ioport != 0)
release_region(ahd->bshs[0].ioport, 256);
if (ahd->tags[1] == BUS_SPACE_PIO
&& ahd->bshs[1].ioport != 0)
release_region(ahd->bshs[1].ioport, 256);
if (ahd->tags[0] == BUS_SPACE_MEMIO
&& ahd->bshs[0].maddr != NULL) {
iounmap(ahd->bshs[0].maddr);
release_mem_region(ahd->platform_data->mem_busaddr,
0x1000);
}
if (ahd->platform_data->host)
scsi_host_put(ahd->platform_data->host);
kfree(ahd->platform_data);
}
}
void
ahd_platform_init(struct ahd_softc *ahd)
{
/*
* Lookup and commit any modified IO Cell options.
*/
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
}
}
void
ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
{
ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
SCB_GET_CHANNEL(ahd, scb),
SCB_GET_LUN(scb), SCB_LIST_NULL,
ROLE_UNKNOWN, CAM_REQUEUE_REQ);
}
void
ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
struct ahd_devinfo *devinfo, ahd_queue_alg alg)
{
struct ahd_linux_device *dev;
int was_queuing;
int now_queuing;
if (sdev == NULL)
return;
dev = scsi_transport_device_data(sdev);
if (dev == NULL)
return;
was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
switch (alg) {
default:
case AHD_QUEUE_NONE:
now_queuing = 0;
break;
case AHD_QUEUE_BASIC:
now_queuing = AHD_DEV_Q_BASIC;
break;
case AHD_QUEUE_TAGGED:
now_queuing = AHD_DEV_Q_TAGGED;
break;
}
if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
&& (was_queuing != now_queuing)
&& (dev->active != 0)) {
dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
dev->qfrozen++;
}
dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
if (now_queuing) {
u_int usertags;
usertags = ahd_linux_user_tagdepth(ahd, devinfo);
if (!was_queuing) {
/*
* Start out aggressively and allow our
* dynamic queue depth algorithm to take
* care of the rest.
*/
dev->maxtags = usertags;
dev->openings = dev->maxtags - dev->active;
}
if (dev->maxtags == 0) {
/*
* Queueing is disabled by the user.
*/
dev->openings = 1;
} else if (alg == AHD_QUEUE_TAGGED) {
dev->flags |= AHD_DEV_Q_TAGGED;
if (aic79xx_periodic_otag != 0)
dev->flags |= AHD_DEV_PERIODIC_OTAG;
} else
dev->flags |= AHD_DEV_Q_BASIC;
} else {
/* We can only have one opening. */
dev->maxtags = 0;
dev->openings = 1 - dev->active;
}
switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
case AHD_DEV_Q_BASIC:
scsi_set_tag_type(sdev, MSG_SIMPLE_TASK);
scsi_activate_tcq(sdev, dev->openings + dev->active);
break;
case AHD_DEV_Q_TAGGED:
scsi_set_tag_type(sdev, MSG_ORDERED_TASK);
scsi_activate_tcq(sdev, dev->openings + dev->active);
break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
* us at any time even though we can only execute them
* serially on the controller/device. This should
* remove some latency.
*/
scsi_deactivate_tcq(sdev, 1);
break;
}
}
int
ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status)
{
return 0;
}
static u_int
ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
{
static int warned_user;
u_int tags;
tags = 0;
if ((ahd->user_discenable & devinfo->target_mask) != 0) {
if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) {
if (warned_user == 0) {
printk(KERN_WARNING
"aic79xx: WARNING: Insufficient tag_info instances\n"
"aic79xx: for installed controllers. Using defaults\n"
"aic79xx: Please update the aic79xx_tag_info array in\n"
"aic79xx: the aic79xx_osm.c source file.\n");
warned_user++;
}
tags = AHD_MAX_QUEUE;
} else {
adapter_tag_info_t *tag_info;
tag_info = &aic79xx_tag_info[ahd->unit];
tags = tag_info->tag_commands[devinfo->target_offset];
if (tags > AHD_MAX_QUEUE)
tags = AHD_MAX_QUEUE;
}
}
return (tags);
}
/*
* Determines the queue depth for a given device.
*/
static void
ahd_linux_device_queue_depth(struct scsi_device *sdev)
{
struct ahd_devinfo devinfo;
u_int tags;
struct ahd_softc *ahd = *((struct ahd_softc **)sdev->host->hostdata);
ahd_compile_devinfo(&devinfo,
ahd->our_id,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
tags = ahd_linux_user_tagdepth(ahd, &devinfo);
if (tags != 0 && sdev->tagged_supported != 0) {
ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED);
ahd_send_async(ahd, devinfo.channel, devinfo.target,
devinfo.lun, AC_TRANSFER_NEG);
ahd_print_devinfo(ahd, &devinfo);
printk("Tagged Queuing enabled. Depth %d\n", tags);
} else {
ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
ahd_send_async(ahd, devinfo.channel, devinfo.target,
devinfo.lun, AC_TRANSFER_NEG);
}
}
static int
ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
struct scsi_cmnd *cmd)
{
struct scb *scb;
struct hardware_scb *hscb;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
u_int col_idx;
uint16_t mask;
unsigned long flags;
int nseg;
nseg = scsi_dma_map(cmd);
if (nseg < 0)
return SCSI_MLQUEUE_HOST_BUSY;
ahd_lock(ahd, &flags);
/*
* Get an scb to use.
*/
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
cmd->device->id, &tstate);
if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
|| (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
col_idx = AHD_NEVER_COL_IDX;
} else {
col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
cmd->device->lun);
}
if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
ahd->flags |= AHD_RESOURCE_SHORTAGE;
ahd_unlock(ahd, &flags);
scsi_dma_unmap(cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
scb->io_ctx = cmd;
scb->platform_data->dev = dev;
hscb = scb->hscb;
cmd->host_scribble = (char *)scb;
/*
* Fill out basics of the HSCB.
*/
hscb->control = 0;
hscb->scsiid = BUILD_SCSIID(ahd, cmd);
hscb->lun = cmd->device->lun;
scb->hscb->task_management = 0;
mask = SCB_GET_TARGET_MASK(ahd, scb);
if ((ahd->user_discenable & mask) != 0)
hscb->control |= DISCENB;
if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
scb->flags |= SCB_PACKETIZED;
if ((tstate->auto_negotiate & mask) != 0) {
scb->flags |= SCB_AUTO_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
}
if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
int msg_bytes;
uint8_t tag_msgs[2];
msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
hscb->control |= tag_msgs[0];
if (tag_msgs[0] == MSG_ORDERED_TASK)
dev->commands_since_idle_or_otag = 0;
} else
if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
&& (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
hscb->control |= MSG_ORDERED_TASK;
dev->commands_since_idle_or_otag = 0;
} else {
hscb->control |= MSG_SIMPLE_TASK;
}
}
hscb->cdb_len = cmd->cmd_len;
memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
scb->platform_data->xfer_len = 0;
ahd_set_residual(scb, 0);
ahd_set_sense_residual(scb, 0);
scb->sg_count = 0;
if (nseg > 0) {
void *sg = scb->sg_list;
struct scatterlist *cur_seg;
int i;
scb->platform_data->xfer_len = 0;
scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
addr = sg_dma_address(cur_seg);
len = sg_dma_len(cur_seg);
scb->platform_data->xfer_len += len;
sg = ahd_sg_setup(ahd, scb, sg, addr, len,
i == (nseg - 1));
}
}
LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
dev->openings--;
dev->active++;
dev->commands_issued++;
if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
dev->commands_since_idle_or_otag++;
scb->flags |= SCB_ACTIVE;
ahd_queue_scb(ahd, scb);
ahd_unlock(ahd, &flags);
return 0;
}
/*
* SCSI controller interrupt handler.
*/
irqreturn_t
ahd_linux_isr(int irq, void *dev_id)
{
struct ahd_softc *ahd;
u_long flags;
int ours;
ahd = (struct ahd_softc *) dev_id;
ahd_lock(ahd, &flags);
ours = ahd_intr(ahd);
ahd_unlock(ahd, &flags);
return IRQ_RETVAL(ours);
}
void
ahd_send_async(struct ahd_softc *ahd, char channel,
u_int target, u_int lun, ac_code code)
{
switch (code) {
case AC_TRANSFER_NEG:
{
char buf[80];
struct scsi_target *starget;
struct info_str info;
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
unsigned int target_ppr_options;
BUG_ON(target == CAM_TARGET_WILDCARD);
info.buffer = buf;
info.length = sizeof(buf);
info.offset = 0;
info.pos = 0;
tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
target, &tstate);
/*
* Don't bother reporting results while
* negotiations are still pending.
*/
if (tinfo->curr.period != tinfo->goal.period
|| tinfo->curr.width != tinfo->goal.width
|| tinfo->curr.offset != tinfo->goal.offset
|| tinfo->curr.ppr_options != tinfo->goal.ppr_options)
if (bootverbose == 0)
break;
/*
* Don't bother reporting results that
* are identical to those last reported.
*/
starget = ahd->platform_data->starget[target];
if (starget == NULL)
break;
target_ppr_options =
(spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
+ (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
+ (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0)
+ (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0)
+ (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0)
+ (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0)
+ (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0)
+ (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0);
if (tinfo->curr.period == spi_period(starget)
&& tinfo->curr.width == spi_width(starget)
&& tinfo->curr.offset == spi_offset(starget)
&& tinfo->curr.ppr_options == target_ppr_options)
if (bootverbose == 0)
break;
spi_period(starget) = tinfo->curr.period;
spi_width(starget) = tinfo->curr.width;
spi_offset(starget) = tinfo->curr.offset;
spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0;
spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0;
spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0;
spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0;
spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0;
spi_display_xfer_agreement(starget);
break;
}
case AC_SENT_BDR:
{
WARN_ON(lun != CAM_LUN_WILDCARD);
scsi_report_device_reset(ahd->platform_data->host,
channel - 'A', target);
break;
}
case AC_BUS_RESET:
if (ahd->platform_data->host != NULL) {
scsi_report_bus_reset(ahd->platform_data->host,
channel - 'A');
}
break;
default:
panic("ahd_send_async: Unexpected async event");
}
}
/*
* Calls the higher level scsi done function and frees the scb.
*/
void
ahd_done(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
struct ahd_linux_device *dev;
if ((scb->flags & SCB_ACTIVE) == 0) {
printk("SCB %d done'd twice\n", SCB_GET_TAG(scb));
ahd_dump_card_state(ahd);
panic("Stopping for safety");
}
LIST_REMOVE(scb, pending_links);
cmd = scb->io_ctx;
dev = scb->platform_data->dev;
dev->active--;
dev->openings++;
if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
cmd->result &= ~(CAM_DEV_QFRZN << 16);
dev->qfrozen--;
}
ahd_linux_unmap_scb(ahd, scb);
/*
* Guard against stale sense data.
* The Linux mid-layer assumes that sense
* was retrieved anytime the first byte of
* the sense buffer looks "sane".
*/
cmd->sense_buffer[0] = 0;
if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
uint32_t amount_xferred;
amount_xferred =
ahd_get_transfer_length(scb) - ahd_get_residual(scb);
if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MISC) != 0) {
ahd_print_path(ahd, scb);
printk("Set CAM_UNCOR_PARITY\n");
}
#endif
ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
#ifdef AHD_REPORT_UNDERFLOWS
/*
* This code is disabled by default as some
* clients of the SCSI system do not properly
* initialize the underflow parameter. This
* results in spurious termination of commands
* that complete as expected (e.g. underflow is
* allowed as command can return variable amounts
* of data.
*/
} else if (amount_xferred < scb->io_ctx->underflow) {
u_int i;
ahd_print_path(ahd, scb);
printk("CDB:");
for (i = 0; i < scb->io_ctx->cmd_len; i++)
printk(" 0x%x", scb->io_ctx->cmnd[i]);
printk("\n");
ahd_print_path(ahd, scb);
printk("Saw underflow (%ld of %ld bytes). "
"Treated as error\n",
ahd_get_residual(scb),
ahd_get_transfer_length(scb));
ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
#endif
} else {
ahd_set_transaction_status(scb, CAM_REQ_CMP);
}
} else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
}
if (dev->openings == 1
&& ahd_get_transaction_status(scb) == CAM_REQ_CMP
&& ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
* shortages by returning queue full. When the queue
* full occurrs, we throttle back. Slowly try to get
* back to our previous queue depth.
*/
if ((dev->openings + dev->active) < dev->maxtags
&& dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
dev->tag_success_count = 0;
dev->openings++;
}
if (dev->active == 0)
dev->commands_since_idle_or_otag = 0;
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
printk("Recovery SCB completes\n");
if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
|| ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
if (ahd->platform_data->eh_done)
complete(ahd->platform_data->eh_done);
}
ahd_free_scb(ahd, scb);
ahd_linux_queue_cmd_complete(ahd, cmd);
}
static void
ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
struct scsi_device *sdev, struct scb *scb)
{
struct ahd_devinfo devinfo;
struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
ahd_compile_devinfo(&devinfo,
ahd->our_id,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
/*
* We don't currently trust the mid-layer to
* properly deal with queue full or busy. So,
* when one occurs, we tell the mid-layer to
* unconditionally requeue the command to us
* so that we can retry it ourselves. We also
* implement our own throttling mechanism so
* we don't clobber the device with too many
* commands.
*/
switch (ahd_get_scsi_status(scb)) {
default:
break;
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_CMD_TERMINATED:
{
struct scsi_cmnd *cmd;
/*
* Copy sense information to the OS's cmd
* structure if it is available.
*/
cmd = scb->io_ctx;
if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
struct scsi_status_iu_header *siu;
u_int sense_size;
u_int sense_offset;
if (scb->flags & SCB_SENSE) {
sense_size = min(sizeof(struct scsi_sense_data)
- ahd_get_sense_residual(scb),
(u_long)SCSI_SENSE_BUFFERSIZE);
sense_offset = 0;
} else {
/*
* Copy only the sense data into the provided
* buffer.
*/
siu = (struct scsi_status_iu_header *)
scb->sense_data;
sense_size = min_t(size_t,
scsi_4btoul(siu->sense_length),
SCSI_SENSE_BUFFERSIZE);
sense_offset = SIU_SENSE_OFFSET(siu);
}
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->sense_buffer,
ahd_get_sense_buf(ahd, scb)
+ sense_offset, sense_size);
cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHD_DEBUG
if (ahd_debug & AHD_SHOW_SENSE) {
int i;
printk("Copied %d bytes of sense data at %d:",
sense_size, sense_offset);
for (i = 0; i < sense_size; i++) {
if ((i & 0xF) == 0)
printk("\n");
printk("0x%x ", cmd->sense_buffer[i]);
}
printk("\n");
}
#endif
}
break;
}
case SCSI_STATUS_QUEUE_FULL:
/*
* By the time the core driver has returned this
* command, all other commands that were queued
* to us but not the device have been returned.
* This ensures that dev->active is equal to
* the number of commands actually queued to
* the device.
*/
dev->tag_success_count = 0;
if (dev->active != 0) {
/*
* Drop our opening count to the number
* of commands currently outstanding.
*/
dev->openings = 0;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
ahd_print_path(ahd, scb);
printk("Dropping tag count to %d\n",
dev->active);
}
#endif
if (dev->active == dev->tags_on_last_queuefull) {
dev->last_queuefull_same_count++;
/*
* If we repeatedly see a queue full
* at the same queue depth, this
* device has a fixed number of tag
* slots. Lock in this tag depth
* so we stop seeing queue fulls from
* this device.
*/
if (dev->last_queuefull_same_count
== AHD_LOCK_TAGS_COUNT) {
dev->maxtags = dev->active;
ahd_print_path(ahd, scb);
printk("Locking max tag count at %d\n",
dev->active);
}
} else {
dev->tags_on_last_queuefull = dev->active;
dev->last_queuefull_same_count = 0;
}
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahd_set_scsi_status(scb, SCSI_STATUS_OK);
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
break;
}
/*
* Drop down to a single opening, and treat this
* as if the target returned BUSY SCSI status.
*/
dev->openings = 1;
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
}
}
static void
ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
{
int status;
int new_status = DID_OK;
int do_fallback = 0;
int scsi_status;
/*
* Map CAM error codes into Linux Error codes. We
* avoid the conversion so that the DV code has the
* full error information available when making
* state change decisions.
*/
status = ahd_cmd_get_transaction_status(cmd);
switch (status) {
case CAM_REQ_INPROG:
case CAM_REQ_CMP:
new_status = DID_OK;
break;
case CAM_AUTOSENSE_FAIL:
new_status = DID_ERROR;
/* Fallthrough */
case CAM_SCSI_STATUS_ERROR:
scsi_status = ahd_cmd_get_scsi_status(cmd);
switch(scsi_status) {
case SCSI_STATUS_CMD_TERMINATED:
case SCSI_STATUS_CHECK_COND:
if ((cmd->result >> 24) != DRIVER_SENSE) {
do_fallback = 1;
} else {
struct scsi_sense_data *sense;
sense = (struct scsi_sense_data *)
cmd->sense_buffer;
if (sense->extra_len >= 5 &&
(sense->add_sense_code == 0x47
|| sense->add_sense_code == 0x48))
do_fallback = 1;
}
break;
default:
break;
}
break;
case CAM_REQ_ABORTED:
new_status = DID_ABORT;
break;
case CAM_BUSY:
new_status = DID_BUS_BUSY;
break;
case CAM_REQ_INVALID:
case CAM_PATH_INVALID:
new_status = DID_BAD_TARGET;
break;
case CAM_SEL_TIMEOUT:
new_status = DID_NO_CONNECT;
break;
case CAM_SCSI_BUS_RESET:
case CAM_BDR_SENT:
new_status = DID_RESET;
break;
case CAM_UNCOR_PARITY:
new_status = DID_PARITY;
do_fallback = 1;
break;
case CAM_CMD_TIMEOUT:
new_status = DID_TIME_OUT;
do_fallback = 1;
break;
case CAM_REQ_CMP_ERR:
case CAM_UNEXP_BUSFREE:
case CAM_DATA_RUN_ERR:
new_status = DID_ERROR;
do_fallback = 1;
break;
case CAM_UA_ABORT:
case CAM_NO_HBA:
case CAM_SEQUENCE_FAIL:
case CAM_CCB_LEN_ERR:
case CAM_PROVIDE_FAIL:
case CAM_REQ_TERMIO:
case CAM_UNREC_HBA_ERROR:
case CAM_REQ_TOO_BIG:
new_status = DID_ERROR;
break;
case CAM_REQUEUE_REQ:
new_status = DID_REQUEUE;
break;
default:
/* We should never get here */
new_status = DID_ERROR;
break;
}
if (do_fallback) {
printk("%s: device overrun (status %x) on %d:%d:%d\n",
ahd_name(ahd), status, cmd->device->channel,
cmd->device->id, cmd->device->lun);
}
ahd_cmd_set_transaction_status(cmd, new_status);
cmd->scsi_done(cmd);
}
static void
ahd_freeze_simq(struct ahd_softc *ahd)
{
scsi_block_requests(ahd->platform_data->host);
}
static void
ahd_release_simq(struct ahd_softc *ahd)
{
scsi_unblock_requests(ahd->platform_data->host);
}
static int
ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev;
struct scb *pending_scb;
u_int saved_scbptr;
u_int active_scbptr;
u_int last_phase;
u_int saved_scsiid;
u_int cdb_byte;
int retval;
int was_paused;
int paused;
int wait;
int disconnected;
ahd_mode_state saved_modes;
unsigned long flags;
pending_scb = NULL;
paused = FALSE;
wait = FALSE;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
scmd_printk(KERN_INFO, cmd,
"Attempting to queue an ABORT message:");
printk("CDB:");
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
printk(" 0x%x", cmd->cmnd[cdb_byte]);
printk("\n");
ahd_lock(ahd, &flags);
/*
* First determine if we currently own this command.
* Start by searching the device queue. If not found
* there, check the pending_scb list. If not found
* at all, and the system wanted us to just abort the
* command, return success.
*/
dev = scsi_transport_device_data(cmd->device);
if (dev == NULL) {
/*
* No target device for this command exists,
* so we must not still own the command.
*/
scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
retval = SUCCESS;
goto no_cmd;
}
/*
* See if we can find a matching cmd in the pending list.
*/
LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
if (pending_scb->io_ctx == cmd)
break;
}
if (pending_scb == NULL) {
scmd_printk(KERN_INFO, cmd, "Command not found\n");
goto no_cmd;
}
if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
/*
* We can't queue two recovery actions using the same SCB
*/
retval = FAILED;
goto done;
}
/*
* Ensure that the card doesn't do anything
* behind our back. Also make sure that we
* didn't "just" miss an interrupt that would
* affect this cmd.
*/
was_paused = ahd_is_paused(ahd);
ahd_pause_and_flushwork(ahd);
paused = TRUE;
if ((pending_scb->flags & SCB_ACTIVE) == 0) {
scmd_printk(KERN_INFO, cmd, "Command already completed\n");
goto no_cmd;
}
printk("%s: At time of recovery, card was %spaused\n",
ahd_name(ahd), was_paused ? "" : "not ");
ahd_dump_card_state(ahd);
disconnected = TRUE;
if (ahd_search_qinfifo(ahd, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun,
pending_scb->hscb->tag,
ROLE_INITIATOR, CAM_REQ_ABORTED,
SEARCH_COMPLETE) > 0) {
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
ahd_name(ahd), cmd->device->channel,
cmd->device->id, cmd->device->lun);
retval = SUCCESS;
goto done;
}
saved_modes = ahd_save_modes(ahd);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
last_phase = ahd_inb(ahd, LASTPHASE);
saved_scbptr = ahd_get_scbptr(ahd);
active_scbptr = saved_scbptr;
if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
struct scb *bus_scb;
bus_scb = ahd_lookup_scb(ahd, active_scbptr);
if (bus_scb == pending_scb)
disconnected = FALSE;
}
/*
* At this point, pending_scb is the scb associated with the
* passed in command. That command is currently active on the
* bus or is in the disconnected state.
*/
saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
if (last_phase != P_BUSFREE
&& SCB_GET_TAG(pending_scb) == active_scbptr) {
/*
* We're active on the bus, so assert ATN
* and hope that the target responds.
*/
pending_scb = ahd_lookup_scb(ahd, active_scbptr);
pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
wait = TRUE;
} else if (disconnected) {
/*
* Actually re-queue this SCB in an attempt
* to select the device before it reconnects.
*/
pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
pending_scb->hscb->cdb_len = 0;
pending_scb->hscb->task_attribute = 0;
pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
/*
* Mark the SCB has having an outstanding
* task management function. Should the command
* complete normally before the task management
* function can be sent, the host will be notified
* to abort our requeued SCB.
*/
ahd_outb(ahd, SCB_TASK_MANAGEMENT,
pending_scb->hscb->task_management);
} else {
/*
* If non-packetized, set the MK_MESSAGE control
* bit indicating that we desire to send a message.
* We also set the disconnected flag since there is
* no guarantee that our SCB control byte matches
* the version on the card. We don't want the
* sequencer to abort the command thinking an
* unsolicited reselection occurred.
*/
pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
/*
* The sequencer will never re-reference the
* in-core SCB. To make sure we are notified
* during reselection, set the MK_MESSAGE flag in
* the card's copy of the SCB.
*/
ahd_outb(ahd, SCB_CONTROL,
ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
}
/*
* Clear out any entries in the QINFIFO first
* so we are the next SCB for this target
* to run.
*/
ahd_search_qinfifo(ahd, cmd->device->id,
cmd->device->channel + 'A', cmd->device->lun,
SCB_LIST_NULL, ROLE_INITIATOR,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
ahd_qinfifo_requeue_tail(ahd, pending_scb);
ahd_set_scbptr(ahd, saved_scbptr);
ahd_print_path(ahd, pending_scb);
printk("Device is disconnected, re-queuing SCB\n");
wait = TRUE;
} else {
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
retval = FAILED;
goto done;
}
no_cmd:
/*
* Our assumption is that if we don't have the command, no
* recovery action was required, so we return success. Again,
* the semantics of the mid-layer recovery engine are not
* well defined, so this may change in time.
*/
retval = SUCCESS;
done:
if (paused)
ahd_unpause(ahd);
if (wait) {
DECLARE_COMPLETION_ONSTACK(done);
ahd->platform_data->eh_done = &done;
ahd_unlock(ahd, &flags);
printk("%s: Recovery code sleeping\n", ahd_name(ahd));
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
ahd_lock(ahd, &flags);
ahd->platform_data->eh_done = NULL;
ahd_unlock(ahd, &flags);
printk("%s: Timer Expired (active %d)\n",
ahd_name(ahd), dev->active);
retval = FAILED;
}
printk("Recovery code awake\n");
} else
ahd_unlock(ahd, &flags);
if (retval != SUCCESS)
printk("%s: Command abort returning 0x%x\n",
ahd_name(ahd), retval);
return retval;
}
static void ahd_linux_set_width(struct scsi_target *starget, int width)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_devinfo devinfo;
unsigned long flags;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_lock(ahd, &flags);
ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_period(struct scsi_target *starget, int period)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options;
unsigned int dt;
unsigned long flags;
unsigned long offset = tinfo->goal.offset;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: set period to %d\n", ahd_name(ahd), period);
#endif
if (offset == 0)
offset = MAX_OFFSET;
if (period < 8)
period = 8;
if (period < 10) {
if (spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_DT_REQ;
if (period == 8)
ppr_options |= MSG_EXT_PPR_IU_REQ;
} else
period = 10;
}
dt = ppr_options & MSG_EXT_PPR_DT_REQ;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
/* all PPR requests apart from QAS require wide transfers */
if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
if (spi_width(starget) == 0)
ppr_options &= MSG_EXT_PPR_QAS_REQ;
}
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = 0;
unsigned int period = 0;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: set offset to %d\n", ahd_name(ahd), offset);
#endif
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
if (offset != 0) {
period = tinfo->goal.period;
ppr_options = tinfo->goal.ppr_options;
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
}
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, offset, ppr_options,
AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_DT_REQ;
unsigned int period = tinfo->goal.period;
unsigned int width = tinfo->goal.width;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s DT\n", ahd_name(ahd),
dt ? "enabling" : "disabling");
#endif
if (dt && spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_DT_REQ;
if (!width)
ahd_linux_set_width(starget, 1);
} else {
if (period <= 9)
period = 10; /* If resetting DT, period must be >= 25ns */
/* IU is invalid without DT set */
ppr_options &= ~MSG_EXT_PPR_IU_REQ;
}
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_QAS_REQ;
unsigned int period = tinfo->goal.period;
unsigned int dt;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s QAS\n", ahd_name(ahd),
qas ? "enabling" : "disabling");
#endif
if (qas) {
ppr_options |= MSG_EXT_PPR_QAS_REQ;
}
dt = ppr_options & MSG_EXT_PPR_DT_REQ;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_IU_REQ;
unsigned int period = tinfo->goal.period;
unsigned int dt;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s IU\n", ahd_name(ahd),
iu ? "enabling" : "disabling");
#endif
if (iu && spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_IU_REQ;
ppr_options |= MSG_EXT_PPR_DT_REQ; /* IU requires DT */
}
dt = ppr_options & MSG_EXT_PPR_DT_REQ;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_RD_STRM;
unsigned int period = tinfo->goal.period;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s Read Streaming\n", ahd_name(ahd),
rdstrm ? "enabling" : "disabling");
#endif
if (rdstrm && spi_max_width(starget))
ppr_options |= MSG_EXT_PPR_RD_STRM;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_WR_FLOW;
unsigned int period = tinfo->goal.period;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s Write Flow Control\n", ahd_name(ahd),
wrflow ? "enabling" : "disabling");
#endif
if (wrflow && spi_max_width(starget))
ppr_options |= MSG_EXT_PPR_WR_FLOW;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_RTI;
unsigned int period = tinfo->goal.period;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
if ((ahd->features & AHD_RTI) == 0) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: RTI not available\n", ahd_name(ahd));
#endif
return;
}
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s RTI\n", ahd_name(ahd),
rti ? "enabling" : "disabling");
#endif
if (rti && spi_max_width(starget))
ppr_options |= MSG_EXT_PPR_RTI;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_PCOMP_EN;
unsigned int period = tinfo->goal.period;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_DV) != 0)
printk("%s: %s Precompensation\n", ahd_name(ahd),
pcomp ? "Enable" : "Disable");
#endif
if (pcomp && spi_max_width(starget)) {
uint8_t precomp;
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
precomp = iocell_opts->precomp;
} else {
precomp = AIC79XX_DEFAULT_PRECOMP;
}
ppr_options |= MSG_EXT_PPR_PCOMP_EN;
AHD_SET_PRECOMP(ahd, precomp);
} else {
AHD_SET_PRECOMP(ahd, 0);
}
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_set_hold_mcs(struct scsi_target *starget, int hold)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahd_softc *ahd = *((struct ahd_softc **)shost->hostdata);
struct ahd_tmode_tstate *tstate;
struct ahd_initiator_tinfo *tinfo
= ahd_fetch_transinfo(ahd,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahd_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_HOLD_MCS;
unsigned int period = tinfo->goal.period;
unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
unsigned long flags;
if (hold && spi_max_width(starget))
ppr_options |= MSG_EXT_PPR_HOLD_MCS;
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahd_find_syncrate(ahd, &period, &ppr_options,
dt ? AHD_SYNCRATE_MAX : AHD_SYNCRATE_ULTRA2);
ahd_lock(ahd, &flags);
ahd_set_syncrate(ahd, &devinfo, period, tinfo->goal.offset,
ppr_options, AHD_TRANS_GOAL, FALSE);
ahd_unlock(ahd, &flags);
}
static void ahd_linux_get_signalling(struct Scsi_Host *shost)
{
struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
unsigned long flags;
u8 mode;
ahd_lock(ahd, &flags);
ahd_pause(ahd);
mode = ahd_inb(ahd, SBLKCTL);
ahd_unpause(ahd);
ahd_unlock(ahd, &flags);
if (mode & ENAB40)
spi_signalling(shost) = SPI_SIGNAL_LVD;
else if (mode & ENAB20)
spi_signalling(shost) = SPI_SIGNAL_SE;
else
spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
}
static struct spi_function_template ahd_linux_transport_functions = {
.set_offset = ahd_linux_set_offset,
.show_offset = 1,
.set_period = ahd_linux_set_period,
.show_period = 1,
.set_width = ahd_linux_set_width,
.show_width = 1,
.set_dt = ahd_linux_set_dt,
.show_dt = 1,
.set_iu = ahd_linux_set_iu,
.show_iu = 1,
.set_qas = ahd_linux_set_qas,
.show_qas = 1,
.set_rd_strm = ahd_linux_set_rd_strm,
.show_rd_strm = 1,
.set_wr_flow = ahd_linux_set_wr_flow,
.show_wr_flow = 1,
.set_rti = ahd_linux_set_rti,
.show_rti = 1,
.set_pcomp_en = ahd_linux_set_pcomp_en,
.show_pcomp_en = 1,
.set_hold_mcs = ahd_linux_set_hold_mcs,
.show_hold_mcs = 1,
.get_signalling = ahd_linux_get_signalling,
};
static int __init
ahd_linux_init(void)
{
int error = 0;
/*
* If we've been passed any parameters, process them now.
*/
if (aic79xx)
aic79xx_setup(aic79xx);
ahd_linux_transport_template =
spi_attach_transport(&ahd_linux_transport_functions);
if (!ahd_linux_transport_template)
return -ENODEV;
scsi_transport_reserve_device(ahd_linux_transport_template,
sizeof(struct ahd_linux_device));
error = ahd_linux_pci_init();
if (error)
spi_release_transport(ahd_linux_transport_template);
return error;
}
static void __exit
ahd_linux_exit(void)
{
ahd_linux_pci_exit();
spi_release_transport(ahd_linux_transport_template);
}
module_init(ahd_linux_init);
module_exit(ahd_linux_exit);
| gpl-2.0 |
vickyvca/MindEater-dior | drivers/scsi/aic7xxx/aic7xxx_osm.c | 5134 | 73156 | /*
* Adaptec AIC7xxx device driver for Linux.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
*
* Copyright (c) 1994 John Aycock
* The University of Calgary Department of Computer Science.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
* driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
* config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
* the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
* the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
* (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
* the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
* ANSI SCSI-2 specification (draft 10c), ...
*
* --------------------------------------------------------------------------
*
* Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
*
* Substantially modified to include support for wide and twin bus
* adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
* SCB paging, and other rework of the code.
*
* --------------------------------------------------------------------------
* Copyright (c) 1994-2000 Justin T. Gibbs.
* Copyright (c) 2000-2001 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
*---------------------------------------------------------------------------
*
* Thanks also go to (in alphabetical order) the following:
*
* Rory Bolt - Sequencer bug fixes
* Jay Estabrook - Initial DEC Alpha support
* Doug Ledford - Much needed abort/reset bug fixes
* Kai Makisara - DMAing of SCBs
*
* A Boot time option was also added for not resetting the scsi bus.
*
* Form: aic7xxx=extended
* aic7xxx=no_reset
* aic7xxx=verbose
*
* Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
*
* Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
*/
/*
* Further driver modifications made by Doug Ledford <dledford@redhat.com>
*
* Copyright (c) 1997-1999 Doug Ledford
*
* These changes are released under the same licensing terms as the FreeBSD
* driver written by Justin Gibbs. Please see his Copyright notice above
* for the exact terms and conditions covering my changes as well as the
* warranty statement.
*
* Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
* but are not limited to:
*
* 1: Import of the latest FreeBSD sequencer code for this driver
* 2: Modification of kernel code to accommodate different sequencer semantics
* 3: Extensive changes throughout kernel portion of driver to improve
* abort/reset processing and error hanndling
* 4: Other work contributed by various people on the Internet
* 5: Changes to printk information and verbosity selection code
* 6: General reliability related changes, especially in IRQ management
* 7: Modifications to the default probe/attach order for supported cards
* 8: SMP friendliness has been improved
*
*/
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include <scsi/scsicam.h>
static struct scsi_transport_template *ahc_linux_transport_template = NULL;
#include <linux/init.h> /* __setup */
#include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */
#include <linux/delay.h> /* For ssleep/msleep */
#include <linux/slab.h>
/*
* Set this to the delay in seconds after SCSI bus reset.
* Note, we honor this only for the initial bus reset.
* The scsi error recovery code performs its own bus settle
* delay handling for error recovery actions.
*/
#ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
#else
#define AIC7XXX_RESET_DELAY 5000
#endif
/*
* Control collection of SCSI transfer statistics for the /proc filesystem.
*
* NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
* NOTE: This does affect performance since it has to maintain statistics.
*/
#ifdef CONFIG_AIC7XXX_PROC_STATS
#define AIC7XXX_PROC_STATS
#endif
/*
* To change the default number of tagged transactions allowed per-device,
* add a line to the lilo.conf file like:
* append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
* which will result in the first four devices on the first two
* controllers being set to a tagged queue depth of 32.
*
* The tag_commands is an array of 16 to allow for wide and twin adapters.
* Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
* for channel 1.
*/
typedef struct {
uint8_t tag_commands[16]; /* Allow for wide/twin adapters. */
} adapter_tag_info_t;
/*
* Modify this as you see fit for your system.
*
* 0 tagged queuing disabled
* 1 <= n <= 253 n == max tags ever dispatched.
*
* The driver will throttle the number of commands dispatched to a
* device if it returns queue full. For devices with a fixed maximum
* queue depth, the driver will eventually determine this depth and
* lock it in (a console message is printed to indicate that a lock
* has occurred). On some devices, queue full is returned for a temporary
* resource shortage. These devices will return queue full at varying
* depths. The driver will throttle back when the queue fulls occur and
* attempt to slowly increase the depth over time as the device recovers
* from the resource shortage.
*
* In this example, the first line will disable tagged queueing for all
* the devices on the first probed aic7xxx adapter.
*
* The second line enables tagged queueing with 4 commands/LUN for IDs
* (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
* driver to attempt to use up to 64 tags for ID 1.
*
* The third line is the same as the first line.
*
* The fourth line disables tagged queueing for devices 0 and 3. It
* enables tagged queueing for the other IDs, with 16 commands/LUN
* for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
* IDs 2, 5-7, and 9-15.
*/
/*
* NOTE: The below structure is for reference only, the actual structure
* to modify in order to change things is just below this comment block.
adapter_tag_info_t aic7xxx_tag_info[] =
{
{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
{{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
};
*/
#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
#else
#define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
#endif
#define AIC7XXX_CONFIGED_TAG_COMMANDS { \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE, \
AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE \
}
/*
* By default, use the number of commands specified by
* the users kernel configuration.
*/
static adapter_tag_info_t aic7xxx_tag_info[] =
{
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS},
{AIC7XXX_CONFIGED_TAG_COMMANDS}
};
/*
* There should be a specific return value for this in scsi.h, but
* it seems that most drivers ignore it.
*/
#define DID_UNDERFLOW DID_ERROR
void
ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
{
printk("(scsi%d:%c:%d:%d): ",
ahc->platform_data->host->host_no,
scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
scb != NULL ? SCB_GET_LUN(scb) : -1);
}
/*
* XXX - these options apply unilaterally to _all_ 274x/284x/294x
* cards in the system. This should be fixed. Exceptions to this
* rule are noted in the comments.
*/
/*
* Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
* has no effect on any later resets that might occur due to things like
* SCSI bus timeouts.
*/
static uint32_t aic7xxx_no_reset;
/*
* Should we force EXTENDED translation on a controller.
* 0 == Use whatever is in the SEEPROM or default to off
* 1 == Use whatever is in the SEEPROM or default to on
*/
static uint32_t aic7xxx_extended;
/*
* PCI bus parity checking of the Adaptec controllers. This is somewhat
* dubious at best. To my knowledge, this option has never actually
* solved a PCI parity problem, but on certain machines with broken PCI
* chipset configurations where stray PCI transactions with bad parity are
* the norm rather than the exception, the error messages can be overwhelming.
* It's included in the driver for completeness.
* 0 = Shut off PCI parity check
* non-0 = reverse polarity pci parity checking
*/
static uint32_t aic7xxx_pci_parity = ~0;
/*
* There are lots of broken chipsets in the world. Some of them will
* violate the PCI spec when we issue byte sized memory writes to our
* controller. I/O mapped register access, if allowed by the given
* platform, will work in almost all cases.
*/
uint32_t aic7xxx_allow_memio = ~0;
/*
* So that we can set how long each device is given as a selection timeout.
* The table of values goes like this:
* 0 - 256ms
* 1 - 128ms
* 2 - 64ms
* 3 - 32ms
* We default to 256ms because some older devices need a longer time
* to respond to initial selection.
*/
static uint32_t aic7xxx_seltime;
/*
* Certain devices do not perform any aging on commands. Should the
* device be saturated by commands in one portion of the disk, it is
* possible for transactions on far away sectors to never be serviced.
* To handle these devices, we can periodically send an ordered tag to
* force all outstanding transactions to be serviced prior to a new
* transaction.
*/
static uint32_t aic7xxx_periodic_otag;
/*
* Module information and settable options.
*/
static char *aic7xxx = NULL;
MODULE_AUTHOR("Maintainer: Hannes Reinecke <hare@suse.de>");
MODULE_DESCRIPTION("Adaptec AIC77XX/78XX SCSI Host Bus Adapter driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
module_param(aic7xxx, charp, 0444);
MODULE_PARM_DESC(aic7xxx,
"period-delimited options string:\n"
" verbose Enable verbose/diagnostic logging\n"
" allow_memio Allow device registers to be memory mapped\n"
" debug Bitmask of debug values to enable\n"
" no_probe Toggle EISA/VLB controller probing\n"
" probe_eisa_vl Toggle EISA/VLB controller probing\n"
" no_reset Suppress initial bus resets\n"
" extended Enable extended geometry on all controllers\n"
" periodic_otag Send an ordered tagged transaction\n"
" periodically to prevent tag starvation.\n"
" This may be required by some older disk\n"
" drives or RAID arrays.\n"
" tag_info:<tag_str> Set per-target tag depth\n"
" global_tag_depth:<int> Global tag depth for every target\n"
" on every bus\n"
" seltime:<int> Selection Timeout\n"
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
"\n"
" Sample modprobe configuration file:\n"
" # Toggle EISA/VLB probing\n"
" # Set tag depth on Controller 1/Target 1 to 10 tags\n"
" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
);
static void ahc_linux_handle_scsi_status(struct ahc_softc *,
struct scsi_device *,
struct scb *);
static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
struct scsi_cmnd *cmd);
static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
static void ahc_linux_release_simq(struct ahc_softc *ahc);
static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo);
static void ahc_linux_device_queue_depth(struct scsi_device *);
static int ahc_linux_run_command(struct ahc_softc*,
struct ahc_linux_device *,
struct scsi_cmnd *);
static void ahc_linux_setup_tag_info_global(char *p);
static int aic7xxx_setup(char *s);
static int ahc_linux_unit;
/************************** OS Utility Wrappers *******************************/
void
ahc_delay(long usec)
{
/*
* udelay on Linux can have problems for
* multi-millisecond waits. Wait at most
* 1024us per call.
*/
while (usec > 0) {
udelay(usec % 1024);
usec -= 1024;
}
}
/***************************** Low Level I/O **********************************/
uint8_t
ahc_inb(struct ahc_softc * ahc, long port)
{
uint8_t x;
if (ahc->tag == BUS_SPACE_MEMIO) {
x = readb(ahc->bsh.maddr + port);
} else {
x = inb(ahc->bsh.ioport + port);
}
mb();
return (x);
}
void
ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
{
if (ahc->tag == BUS_SPACE_MEMIO) {
writeb(val, ahc->bsh.maddr + port);
} else {
outb(val, ahc->bsh.ioport + port);
}
mb();
}
void
ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
{
int i;
/*
* There is probably a more efficient way to do this on Linux
* but we don't use this for anything speed critical and this
* should work.
*/
for (i = 0; i < count; i++)
ahc_outb(ahc, port, *array++);
}
void
ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
{
int i;
/*
* There is probably a more efficient way to do this on Linux
* but we don't use this for anything speed critical and this
* should work.
*/
for (i = 0; i < count; i++)
*array++ = ahc_inb(ahc, port);
}
/********************************* Inlines ************************************/
static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
static void
ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
{
struct scsi_cmnd *cmd;
cmd = scb->io_ctx;
ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
scsi_dma_unmap(cmd);
}
static int
ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
{
int consumed;
if ((scb->sg_count + 1) > AHC_NSEG)
panic("Too few segs for dma mapping. "
"Increase AHC_NSEG\n");
consumed = 1;
sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
scb->platform_data->xfer_len += len;
if (sizeof(dma_addr_t) > 4
&& (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
sg->len = ahc_htole32(len);
return (consumed);
}
/*
* Return a string describing the driver.
*/
static const char *
ahc_linux_info(struct Scsi_Host *host)
{
static char buffer[512];
char ahc_info[256];
char *bp;
struct ahc_softc *ahc;
bp = &buffer[0];
ahc = *(struct ahc_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
" <");
strcat(bp, ahc->description);
strcat(bp, ">\n"
" ");
ahc_controller_info(ahc, ahc_info);
strcat(bp, ahc_info);
strcat(bp, "\n");
return (bp);
}
/*
* Queue an SCB to the controller.
*/
static int
ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
int rtn = SCSI_MLQUEUE_HOST_BUSY;
unsigned long flags;
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
ahc_lock(ahc, &flags);
if (ahc->platform_data->qfrozen == 0) {
cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahc_linux_run_command(ahc, dev, cmd);
}
ahc_unlock(ahc, &flags);
return rtn;
}
static DEF_SCSI_QCMD(ahc_linux_queue)
static inline struct scsi_target **
ahc_linux_target_in_softc(struct scsi_target *starget)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
unsigned int target_offset;
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
return &ahc->platform_data->starget[target_offset];
}
static int
ahc_linux_target_alloc(struct scsi_target *starget)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)dev_to_shost(&starget->dev)->hostdata);
struct seeprom_config *sc = ahc->seep_config;
unsigned long flags;
struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
unsigned short scsirate;
struct ahc_devinfo devinfo;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
char channel = starget->channel + 'A';
unsigned int our_id = ahc->our_id;
unsigned int target_offset;
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
if (starget->channel)
our_id = ahc->our_id_b;
ahc_lock(ahc, &flags);
BUG_ON(*ahc_targp != NULL);
*ahc_targp = starget;
if (sc) {
int maxsync = AHC_SYNCRATE_DT;
int ultra = 0;
int flags = sc->device_flags[target_offset];
if (ahc->flags & AHC_NEWEEPROM_FMT) {
if (flags & CFSYNCHISULTRA)
ultra = 1;
} else if (flags & CFULTRAEN)
ultra = 1;
/* AIC nutcase; 10MHz appears as ultra = 1, CFXFER = 0x04
* change it to ultra=0, CFXFER = 0 */
if(ultra && (flags & CFXFER) == 0x04) {
ultra = 0;
flags &= ~CFXFER;
}
if ((ahc->features & AHC_ULTRA2) != 0) {
scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
} else {
scsirate = (flags & CFXFER) << 4;
maxsync = ultra ? AHC_SYNCRATE_ULTRA :
AHC_SYNCRATE_FAST;
}
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
if (!(flags & CFSYNCH))
spi_max_offset(starget) = 0;
spi_min_period(starget) =
ahc_find_period(ahc, scsirate, maxsync);
tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id,
starget->id, &tstate);
}
ahc_compile_devinfo(&devinfo, our_id, starget->id,
CAM_LUN_WILDCARD, channel,
ROLE_INITIATOR);
ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
AHC_TRANS_GOAL, /*paused*/FALSE);
ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHC_TRANS_GOAL, /*paused*/FALSE);
ahc_unlock(ahc, &flags);
return 0;
}
static void
ahc_linux_target_destroy(struct scsi_target *starget)
{
struct scsi_target **ahc_targp = ahc_linux_target_in_softc(starget);
*ahc_targp = NULL;
}
static int
ahc_linux_slave_alloc(struct scsi_device *sdev)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)sdev->host->hostdata);
struct scsi_target *starget = sdev->sdev_target;
struct ahc_linux_device *dev;
if (bootverbose)
printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
dev = scsi_transport_device_data(sdev);
memset(dev, 0, sizeof(*dev));
/*
* We start out life using untagged
* transactions of which we allow one.
*/
dev->openings = 1;
/*
* Set maxtags to 0. This will be changed if we
* later determine that we are dealing with
* a tagged queuing capable device.
*/
dev->maxtags = 0;
spi_period(starget) = 0;
return 0;
}
static int
ahc_linux_slave_configure(struct scsi_device *sdev)
{
struct ahc_softc *ahc;
ahc = *((struct ahc_softc **)sdev->host->hostdata);
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
ahc_linux_device_queue_depth(sdev);
/* Initial Domain Validation */
if (!spi_initial_dv(sdev->sdev_target))
spi_dv_device(sdev);
return 0;
}
#if defined(__i386__)
/*
* Return the disk geometry for the given SCSI device.
*/
static int
ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
uint8_t *bh;
int heads;
int sectors;
int cylinders;
int ret;
int extended;
struct ahc_softc *ahc;
u_int channel;
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev_channel(sdev);
bh = scsi_bios_ptable(bdev);
if (bh) {
ret = scsi_partsize(bh, capacity,
&geom[2], &geom[0], &geom[1]);
kfree(bh);
if (ret != -1)
return (ret);
}
heads = 64;
sectors = 32;
cylinders = aic_sector_div(capacity, heads, sectors);
if (aic7xxx_extended != 0)
extended = 1;
else if (channel == 0)
extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
else
extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
if (extended && cylinders >= 1024) {
heads = 255;
sectors = 63;
cylinders = aic_sector_div(capacity, heads, sectors);
}
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
return (0);
}
#endif
/*
* Abort the current SCSI command(s).
*/
static int
ahc_linux_abort(struct scsi_cmnd *cmd)
{
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
if (error != 0)
printk("aic7xxx_abort returns 0x%x\n", error);
return (error);
}
/*
* Attempt to send a target reset message to the device that timed out.
*/
static int
ahc_linux_dev_reset(struct scsi_cmnd *cmd)
{
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
if (error != 0)
printk("aic7xxx_dev_reset returns 0x%x\n", error);
return (error);
}
/*
* Reset the SCSI bus.
*/
static int
ahc_linux_bus_reset(struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
int found;
unsigned long flags;
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
ahc_lock(ahc, &flags);
found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE);
ahc_unlock(ahc, &flags);
if (bootverbose)
printk("%s: SCSI bus reset delivered. "
"%d SCBs aborted.\n", ahc_name(ahc), found);
return SUCCESS;
}
struct scsi_host_template aic7xxx_driver_template = {
.module = THIS_MODULE,
.name = "aic7xxx",
.proc_name = "aic7xxx",
.proc_info = ahc_linux_proc_info,
.info = ahc_linux_info,
.queuecommand = ahc_linux_queue,
.eh_abort_handler = ahc_linux_abort,
.eh_device_reset_handler = ahc_linux_dev_reset,
.eh_bus_reset_handler = ahc_linux_bus_reset,
#if defined(__i386__)
.bios_param = ahc_linux_biosparam,
#endif
.can_queue = AHC_MAX_QUEUE,
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
.use_clustering = ENABLE_CLUSTERING,
.slave_alloc = ahc_linux_slave_alloc,
.slave_configure = ahc_linux_slave_configure,
.target_alloc = ahc_linux_target_alloc,
.target_destroy = ahc_linux_target_destroy,
};
/**************************** Tasklet Handler *********************************/
/******************************** Macros **************************************/
#define BUILD_SCSIID(ahc, cmd) \
((((cmd)->device->id << TID_SHIFT) & TID) \
| (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
| (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
/******************************** Bus DMA *************************************/
int
ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
bus_size_t alignment, bus_size_t boundary,
dma_addr_t lowaddr, dma_addr_t highaddr,
bus_dma_filter_t *filter, void *filterarg,
bus_size_t maxsize, int nsegments,
bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
{
bus_dma_tag_t dmat;
dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
if (dmat == NULL)
return (ENOMEM);
/*
* Linux is very simplistic about DMA memory. For now don't
* maintain all specification information. Once Linux supplies
* better facilities for doing these operations, or the
* needs of this particular driver change, we might need to do
* more here.
*/
dmat->alignment = alignment;
dmat->boundary = boundary;
dmat->maxsize = maxsize;
*ret_tag = dmat;
return (0);
}
void
ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
{
kfree(dmat);
}
int
ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
*vaddr = pci_alloc_consistent(ahc->dev_softc,
dmat->maxsize, mapp);
if (*vaddr == NULL)
return ENOMEM;
return 0;
}
void
ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
pci_free_consistent(ahc->dev_softc, dmat->maxsize,
vaddr, map);
}
int
ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
void *cb_arg, int flags)
{
/*
* Assume for now that this will only be used during
* initialization and not for per-transaction buffer mapping.
*/
bus_dma_segment_t stack_sg;
stack_sg.ds_addr = map;
stack_sg.ds_len = dmat->maxsize;
cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
return (0);
}
void
ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
{
}
int
ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
{
/* Nothing to do */
return (0);
}
static void
ahc_linux_setup_tag_info_global(char *p)
{
int tags, i, j;
tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
printk("Setting Global Tags= %d\n", tags);
for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) {
for (j = 0; j < AHC_NUM_TARGETS; j++) {
aic7xxx_tag_info[i].tag_commands[j] = tags;
}
}
}
static void
ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
{
if ((instance >= 0) && (targ >= 0)
&& (instance < ARRAY_SIZE(aic7xxx_tag_info))
&& (targ < AHC_NUM_TARGETS)) {
aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
if (bootverbose)
printk("tag_info[%d:%d] = %d\n", instance, targ, value);
}
}
static char *
ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
void (*callback)(u_long, int, int, int32_t),
u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printk("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}
/*
* Handle Linux boot parameters. This routine allows for assigning a value
* to a parameter with a ':' between the parameter and the value.
* ie. aic7xxx=stpwlev:1,extended
*/
static int
aic7xxx_setup(char *s)
{
int i, n;
char *p;
char *end;
static const struct {
const char *name;
uint32_t *flag;
} options[] = {
{ "extended", &aic7xxx_extended },
{ "no_reset", &aic7xxx_no_reset },
{ "verbose", &aic7xxx_verbose },
{ "allow_memio", &aic7xxx_allow_memio},
#ifdef AHC_DEBUG
{ "debug", &ahc_debug },
#endif
{ "periodic_otag", &aic7xxx_periodic_otag },
{ "pci_parity", &aic7xxx_pci_parity },
{ "seltime", &aic7xxx_seltime },
{ "tag_info", NULL },
{ "global_tag_depth", NULL },
{ "dv", NULL }
};
end = strchr(s, '\0');
/*
* XXX ia64 gcc isn't smart enough to know that ARRAY_SIZE
* will never be 0 in this case.
*/
n = 0;
while ((p = strsep(&s, ",.")) != NULL) {
if (*p == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(options); i++) {
n = strlen(options[i].name);
if (strncmp(options[i].name, p, n) == 0)
break;
}
if (i == ARRAY_SIZE(options))
continue;
if (strncmp(p, "global_tag_depth", n) == 0) {
ahc_linux_setup_tag_info_global(p + n);
} else if (strncmp(p, "tag_info", n) == 0) {
s = ahc_parse_brace_option("tag_info", p + n, end,
2, ahc_linux_setup_tag_info, 0);
} else if (p[n] == ':') {
*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
} else if (strncmp(p, "verbose", n) == 0) {
*(options[i].flag) = 1;
} else {
*(options[i].flag) ^= 0xFFFFFFFF;
}
}
return 1;
}
__setup("aic7xxx=", aic7xxx_setup);
uint32_t aic7xxx_verbose;
int
ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template)
{
char buf[80];
struct Scsi_Host *host;
char *new_name;
u_long s;
int retval;
template->name = ahc->description;
host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
if (host == NULL)
return (ENOMEM);
*((struct ahc_softc **)host->hostdata) = ahc;
ahc->platform_data->host = host;
host->can_queue = AHC_MAX_QUEUE;
host->cmd_per_lun = 2;
/* XXX No way to communicate the ID for multiple channels */
host->this_id = ahc->our_id;
host->irq = ahc->platform_data->irq;
host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
host->max_lun = AHC_NUM_LUNS;
host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
host->sg_tablesize = AHC_NSEG;
ahc_lock(ahc, &s);
ahc_set_unit(ahc, ahc_linux_unit++);
ahc_unlock(ahc, &s);
sprintf(buf, "scsi%d", host->host_no);
new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
if (new_name != NULL) {
strcpy(new_name, buf);
ahc_set_name(ahc, new_name);
}
host->unique_id = ahc->unit;
ahc_linux_initialize_scsi_bus(ahc);
ahc_intr_enable(ahc, TRUE);
host->transportt = ahc_linux_transport_template;
retval = scsi_add_host(host,
(ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
if (retval) {
printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
scsi_host_put(host);
return retval;
}
scsi_scan_host(host);
return 0;
}
/*
* Place the SCSI bus into a known state by either resetting it,
* or forcing transfer negotiations on the next command to any
* target.
*/
void
ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
{
int i;
int numtarg;
unsigned long s;
i = 0;
numtarg = 0;
ahc_lock(ahc, &s);
if (aic7xxx_no_reset != 0)
ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
if ((ahc->flags & AHC_RESET_BUS_A) != 0)
ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
else
numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
if ((ahc->features & AHC_TWIN) != 0) {
if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
} else {
if (numtarg == 0)
i = 8;
numtarg += 8;
}
}
/*
* Force negotiation to async for all targets that
* will not see an initial bus reset.
*/
for (; i < numtarg; i++) {
struct ahc_devinfo devinfo;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
u_int our_id;
u_int target_id;
char channel;
channel = 'A';
our_id = ahc->our_id;
target_id = i;
if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
channel = 'B';
our_id = ahc->our_id_b;
target_id = i % 8;
}
tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
target_id, &tstate);
ahc_compile_devinfo(&devinfo, our_id, target_id,
CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
ahc_update_neg_request(ahc, &devinfo, tstate,
tinfo, AHC_NEG_ALWAYS);
}
ahc_unlock(ahc, &s);
/* Give the bus some time to recover */
if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
ahc_linux_freeze_simq(ahc);
msleep(AIC7XXX_RESET_DELAY);
ahc_linux_release_simq(ahc);
}
}
int
ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
{
ahc->platform_data =
kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
if (ahc->platform_data == NULL)
return (ENOMEM);
memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
ahc->platform_data->irq = AHC_LINUX_NOIRQ;
ahc_lockinit(ahc);
ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
if (aic7xxx_pci_parity == 0)
ahc->flags |= AHC_DISABLE_PCI_PERR;
return (0);
}
void
ahc_platform_free(struct ahc_softc *ahc)
{
struct scsi_target *starget;
int i;
if (ahc->platform_data != NULL) {
/* destroy all of the device and target objects */
for (i = 0; i < AHC_NUM_TARGETS; i++) {
starget = ahc->platform_data->starget[i];
if (starget != NULL) {
ahc->platform_data->starget[i] = NULL;
}
}
if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
free_irq(ahc->platform_data->irq, ahc);
if (ahc->tag == BUS_SPACE_PIO
&& ahc->bsh.ioport != 0)
release_region(ahc->bsh.ioport, 256);
if (ahc->tag == BUS_SPACE_MEMIO
&& ahc->bsh.maddr != NULL) {
iounmap(ahc->bsh.maddr);
release_mem_region(ahc->platform_data->mem_busaddr,
0x1000);
}
if (ahc->platform_data->host)
scsi_host_put(ahc->platform_data->host);
kfree(ahc->platform_data);
}
}
void
ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
{
ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
SCB_GET_CHANNEL(ahc, scb),
SCB_GET_LUN(scb), SCB_LIST_NULL,
ROLE_UNKNOWN, CAM_REQUEUE_REQ);
}
void
ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
struct ahc_devinfo *devinfo, ahc_queue_alg alg)
{
struct ahc_linux_device *dev;
int was_queuing;
int now_queuing;
if (sdev == NULL)
return;
dev = scsi_transport_device_data(sdev);
was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
switch (alg) {
default:
case AHC_QUEUE_NONE:
now_queuing = 0;
break;
case AHC_QUEUE_BASIC:
now_queuing = AHC_DEV_Q_BASIC;
break;
case AHC_QUEUE_TAGGED:
now_queuing = AHC_DEV_Q_TAGGED;
break;
}
if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
&& (was_queuing != now_queuing)
&& (dev->active != 0)) {
dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
dev->qfrozen++;
}
dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
if (now_queuing) {
u_int usertags;
usertags = ahc_linux_user_tagdepth(ahc, devinfo);
if (!was_queuing) {
/*
* Start out aggressively and allow our
* dynamic queue depth algorithm to take
* care of the rest.
*/
dev->maxtags = usertags;
dev->openings = dev->maxtags - dev->active;
}
if (dev->maxtags == 0) {
/*
* Queueing is disabled by the user.
*/
dev->openings = 1;
} else if (alg == AHC_QUEUE_TAGGED) {
dev->flags |= AHC_DEV_Q_TAGGED;
if (aic7xxx_periodic_otag != 0)
dev->flags |= AHC_DEV_PERIODIC_OTAG;
} else
dev->flags |= AHC_DEV_Q_BASIC;
} else {
/* We can only have one opening. */
dev->maxtags = 0;
dev->openings = 1 - dev->active;
}
switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
case AHC_DEV_Q_BASIC:
scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
scsi_activate_tcq(sdev, dev->openings + dev->active);
break;
case AHC_DEV_Q_TAGGED:
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
scsi_activate_tcq(sdev, dev->openings + dev->active);
break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
* us at any time even though we can only execute them
* serially on the controller/device. This should
* remove some latency.
*/
scsi_deactivate_tcq(sdev, 2);
break;
}
}
int
ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status)
{
return 0;
}
static u_int
ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
{
static int warned_user;
u_int tags;
tags = 0;
if ((ahc->user_discenable & devinfo->target_mask) != 0) {
if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) {
if (warned_user == 0) {
printk(KERN_WARNING
"aic7xxx: WARNING: Insufficient tag_info instances\n"
"aic7xxx: for installed controllers. Using defaults\n"
"aic7xxx: Please update the aic7xxx_tag_info array in\n"
"aic7xxx: the aic7xxx_osm..c source file.\n");
warned_user++;
}
tags = AHC_MAX_QUEUE;
} else {
adapter_tag_info_t *tag_info;
tag_info = &aic7xxx_tag_info[ahc->unit];
tags = tag_info->tag_commands[devinfo->target_offset];
if (tags > AHC_MAX_QUEUE)
tags = AHC_MAX_QUEUE;
}
}
return (tags);
}
/*
* Determines the queue depth for a given device.
*/
static void
ahc_linux_device_queue_depth(struct scsi_device *sdev)
{
struct ahc_devinfo devinfo;
u_int tags;
struct ahc_softc *ahc = *((struct ahc_softc **)sdev->host->hostdata);
ahc_compile_devinfo(&devinfo,
sdev->sdev_target->channel == 0
? ahc->our_id : ahc->our_id_b,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
tags = ahc_linux_user_tagdepth(ahc, &devinfo);
if (tags != 0 && sdev->tagged_supported != 0) {
ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_TAGGED);
ahc_send_async(ahc, devinfo.channel, devinfo.target,
devinfo.lun, AC_TRANSFER_NEG);
ahc_print_devinfo(ahc, &devinfo);
printk("Tagged Queuing enabled. Depth %d\n", tags);
} else {
ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE);
ahc_send_async(ahc, devinfo.channel, devinfo.target,
devinfo.lun, AC_TRANSFER_NEG);
}
}
static int
ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
struct scsi_cmnd *cmd)
{
struct scb *scb;
struct hardware_scb *hscb;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
uint16_t mask;
struct scb_tailq *untagged_q = NULL;
int nseg;
/*
* Schedule us to run later. The only reason we are not
* running is because the whole controller Q is frozen.
*/
if (ahc->platform_data->qfrozen != 0)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* We only allow one untagged transaction
* per target in the initiator role unless
* we are storing a full busy target *lun*
* table in SCB space.
*/
if (!blk_rq_tagged(cmd->request)
&& (ahc->features & AHC_SCB_BTT) == 0) {
int target_offset;
target_offset = cmd->device->id + cmd->device->channel * 8;
untagged_q = &(ahc->untagged_queues[target_offset]);
if (!TAILQ_EMPTY(untagged_q))
/* if we're already executing an untagged command
* we're busy to another */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
nseg = scsi_dma_map(cmd);
if (nseg < 0)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Get an scb to use.
*/
scb = ahc_get_scb(ahc);
if (!scb) {
scsi_dma_unmap(cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
scb->io_ctx = cmd;
scb->platform_data->dev = dev;
hscb = scb->hscb;
cmd->host_scribble = (char *)scb;
/*
* Fill out basics of the HSCB.
*/
hscb->control = 0;
hscb->scsiid = BUILD_SCSIID(ahc, cmd);
hscb->lun = cmd->device->lun;
mask = SCB_GET_TARGET_MASK(ahc, scb);
tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
SCB_GET_OUR_ID(scb),
SCB_GET_TARGET(ahc, scb), &tstate);
hscb->scsirate = tinfo->scsirate;
hscb->scsioffset = tinfo->curr.offset;
if ((tstate->ultraenb & mask) != 0)
hscb->control |= ULTRAENB;
if ((ahc->user_discenable & mask) != 0)
hscb->control |= DISCENB;
if ((tstate->auto_negotiate & mask) != 0) {
scb->flags |= SCB_AUTO_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
}
if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
int msg_bytes;
uint8_t tag_msgs[2];
msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
hscb->control |= tag_msgs[0];
if (tag_msgs[0] == MSG_ORDERED_TASK)
dev->commands_since_idle_or_otag = 0;
} else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
&& (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
hscb->control |= MSG_ORDERED_TASK;
dev->commands_since_idle_or_otag = 0;
} else {
hscb->control |= MSG_SIMPLE_TASK;
}
}
hscb->cdb_len = cmd->cmd_len;
if (hscb->cdb_len <= 12) {
memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
} else {
memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
scb->flags |= SCB_CDB32_PTR;
}
scb->platform_data->xfer_len = 0;
ahc_set_residual(scb, 0);
ahc_set_sense_residual(scb, 0);
scb->sg_count = 0;
if (nseg > 0) {
struct ahc_dma_seg *sg;
struct scatterlist *cur_seg;
int i;
/* Copy the segments into the SG list. */
sg = scb->sg_list;
/*
* The sg_count may be larger than nseg if
* a transfer crosses a 32bit page.
*/
scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
int consumed;
addr = sg_dma_address(cur_seg);
len = sg_dma_len(cur_seg);
consumed = ahc_linux_map_seg(ahc, scb,
sg, addr, len);
sg += consumed;
scb->sg_count += consumed;
}
sg--;
sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
/*
* Reset the sg list pointer.
*/
scb->hscb->sgptr =
ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
/*
* Copy the first SG into the "current"
* data pointer area.
*/
scb->hscb->dataptr = scb->sg_list->addr;
scb->hscb->datacnt = scb->sg_list->len;
} else {
scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
scb->hscb->dataptr = 0;
scb->hscb->datacnt = 0;
scb->sg_count = 0;
}
LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
dev->openings--;
dev->active++;
dev->commands_issued++;
if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
dev->commands_since_idle_or_otag++;
scb->flags |= SCB_ACTIVE;
if (untagged_q) {
TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
scb->flags |= SCB_UNTAGGEDQ;
}
ahc_queue_scb(ahc, scb);
return 0;
}
/*
* SCSI controller interrupt handler.
*/
irqreturn_t
ahc_linux_isr(int irq, void *dev_id)
{
struct ahc_softc *ahc;
u_long flags;
int ours;
ahc = (struct ahc_softc *) dev_id;
ahc_lock(ahc, &flags);
ours = ahc_intr(ahc);
ahc_unlock(ahc, &flags);
return IRQ_RETVAL(ours);
}
void
ahc_platform_flushwork(struct ahc_softc *ahc)
{
}
void
ahc_send_async(struct ahc_softc *ahc, char channel,
u_int target, u_int lun, ac_code code)
{
switch (code) {
case AC_TRANSFER_NEG:
{
char buf[80];
struct scsi_target *starget;
struct ahc_linux_target *targ;
struct info_str info;
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
int target_offset;
unsigned int target_ppr_options;
BUG_ON(target == CAM_TARGET_WILDCARD);
info.buffer = buf;
info.length = sizeof(buf);
info.offset = 0;
info.pos = 0;
tinfo = ahc_fetch_transinfo(ahc, channel,
channel == 'A' ? ahc->our_id
: ahc->our_id_b,
target, &tstate);
/*
* Don't bother reporting results while
* negotiations are still pending.
*/
if (tinfo->curr.period != tinfo->goal.period
|| tinfo->curr.width != tinfo->goal.width
|| tinfo->curr.offset != tinfo->goal.offset
|| tinfo->curr.ppr_options != tinfo->goal.ppr_options)
if (bootverbose == 0)
break;
/*
* Don't bother reporting results that
* are identical to those last reported.
*/
target_offset = target;
if (channel == 'B')
target_offset += 8;
starget = ahc->platform_data->starget[target_offset];
if (starget == NULL)
break;
targ = scsi_transport_target_data(starget);
target_ppr_options =
(spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
+ (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0)
+ (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0);
if (tinfo->curr.period == spi_period(starget)
&& tinfo->curr.width == spi_width(starget)
&& tinfo->curr.offset == spi_offset(starget)
&& tinfo->curr.ppr_options == target_ppr_options)
if (bootverbose == 0)
break;
spi_period(starget) = tinfo->curr.period;
spi_width(starget) = tinfo->curr.width;
spi_offset(starget) = tinfo->curr.offset;
spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0;
spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0;
spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0;
spi_display_xfer_agreement(starget);
break;
}
case AC_SENT_BDR:
{
WARN_ON(lun != CAM_LUN_WILDCARD);
scsi_report_device_reset(ahc->platform_data->host,
channel - 'A', target);
break;
}
case AC_BUS_RESET:
if (ahc->platform_data->host != NULL) {
scsi_report_bus_reset(ahc->platform_data->host,
channel - 'A');
}
break;
default:
panic("ahc_send_async: Unexpected async event");
}
}
/*
* Calls the higher level scsi done function and frees the scb.
*/
void
ahc_done(struct ahc_softc *ahc, struct scb *scb)
{
struct scsi_cmnd *cmd;
struct ahc_linux_device *dev;
LIST_REMOVE(scb, pending_links);
if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
struct scb_tailq *untagged_q;
int target_offset;
target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
untagged_q = &(ahc->untagged_queues[target_offset]);
TAILQ_REMOVE(untagged_q, scb, links.tqe);
BUG_ON(!TAILQ_EMPTY(untagged_q));
} else if ((scb->flags & SCB_ACTIVE) == 0) {
/*
* Transactions aborted from the untagged queue may
* not have been dispatched to the controller, so
* only check the SCB_ACTIVE flag for tagged transactions.
*/
printk("SCB %d done'd twice\n", scb->hscb->tag);
ahc_dump_card_state(ahc);
panic("Stopping for safety");
}
cmd = scb->io_ctx;
dev = scb->platform_data->dev;
dev->active--;
dev->openings++;
if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
cmd->result &= ~(CAM_DEV_QFRZN << 16);
dev->qfrozen--;
}
ahc_linux_unmap_scb(ahc, scb);
/*
* Guard against stale sense data.
* The Linux mid-layer assumes that sense
* was retrieved anytime the first byte of
* the sense buffer looks "sane".
*/
cmd->sense_buffer[0] = 0;
if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
uint32_t amount_xferred;
amount_xferred =
ahc_get_transfer_length(scb) - ahc_get_residual(scb);
if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
#ifdef AHC_DEBUG
if ((ahc_debug & AHC_SHOW_MISC) != 0) {
ahc_print_path(ahc, scb);
printk("Set CAM_UNCOR_PARITY\n");
}
#endif
ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
#ifdef AHC_REPORT_UNDERFLOWS
/*
* This code is disabled by default as some
* clients of the SCSI system do not properly
* initialize the underflow parameter. This
* results in spurious termination of commands
* that complete as expected (e.g. underflow is
* allowed as command can return variable amounts
* of data.
*/
} else if (amount_xferred < scb->io_ctx->underflow) {
u_int i;
ahc_print_path(ahc, scb);
printk("CDB:");
for (i = 0; i < scb->io_ctx->cmd_len; i++)
printk(" 0x%x", scb->io_ctx->cmnd[i]);
printk("\n");
ahc_print_path(ahc, scb);
printk("Saw underflow (%ld of %ld bytes). "
"Treated as error\n",
ahc_get_residual(scb),
ahc_get_transfer_length(scb));
ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
#endif
} else {
ahc_set_transaction_status(scb, CAM_REQ_CMP);
}
} else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
ahc_linux_handle_scsi_status(ahc, cmd->device, scb);
}
if (dev->openings == 1
&& ahc_get_transaction_status(scb) == CAM_REQ_CMP
&& ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
* shortages by returning queue full. When the queue
* full occurrs, we throttle back. Slowly try to get
* back to our previous queue depth.
*/
if ((dev->openings + dev->active) < dev->maxtags
&& dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
dev->tag_success_count = 0;
dev->openings++;
}
if (dev->active == 0)
dev->commands_since_idle_or_otag = 0;
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
printk("Recovery SCB completes\n");
if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
|| ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
if (ahc->platform_data->eh_done)
complete(ahc->platform_data->eh_done);
}
ahc_free_scb(ahc, scb);
ahc_linux_queue_cmd_complete(ahc, cmd);
}
static void
ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
struct scsi_device *sdev, struct scb *scb)
{
struct ahc_devinfo devinfo;
struct ahc_linux_device *dev = scsi_transport_device_data(sdev);
ahc_compile_devinfo(&devinfo,
ahc->our_id,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
/*
* We don't currently trust the mid-layer to
* properly deal with queue full or busy. So,
* when one occurs, we tell the mid-layer to
* unconditionally requeue the command to us
* so that we can retry it ourselves. We also
* implement our own throttling mechanism so
* we don't clobber the device with too many
* commands.
*/
switch (ahc_get_scsi_status(scb)) {
default:
break;
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_CMD_TERMINATED:
{
struct scsi_cmnd *cmd;
/*
* Copy sense information to the OS's cmd
* structure if it is available.
*/
cmd = scb->io_ctx;
if (scb->flags & SCB_SENSE) {
u_int sense_size;
sense_size = min(sizeof(struct scsi_sense_data)
- ahc_get_sense_residual(scb),
(u_long)SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->sense_buffer,
ahc_get_sense_buf(ahc, scb), sense_size);
if (sense_size < SCSI_SENSE_BUFFERSIZE)
memset(&cmd->sense_buffer[sense_size], 0,
SCSI_SENSE_BUFFERSIZE - sense_size);
cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_SENSE) {
int i;
printk("Copied %d bytes of sense data:",
sense_size);
for (i = 0; i < sense_size; i++) {
if ((i & 0xF) == 0)
printk("\n");
printk("0x%x ", cmd->sense_buffer[i]);
}
printk("\n");
}
#endif
}
break;
}
case SCSI_STATUS_QUEUE_FULL:
{
/*
* By the time the core driver has returned this
* command, all other commands that were queued
* to us but not the device have been returned.
* This ensures that dev->active is equal to
* the number of commands actually queued to
* the device.
*/
dev->tag_success_count = 0;
if (dev->active != 0) {
/*
* Drop our opening count to the number
* of commands currently outstanding.
*/
dev->openings = 0;
/*
ahc_print_path(ahc, scb);
printk("Dropping tag count to %d\n", dev->active);
*/
if (dev->active == dev->tags_on_last_queuefull) {
dev->last_queuefull_same_count++;
/*
* If we repeatedly see a queue full
* at the same queue depth, this
* device has a fixed number of tag
* slots. Lock in this tag depth
* so we stop seeing queue fulls from
* this device.
*/
if (dev->last_queuefull_same_count
== AHC_LOCK_TAGS_COUNT) {
dev->maxtags = dev->active;
ahc_print_path(ahc, scb);
printk("Locking max tag count at %d\n",
dev->active);
}
} else {
dev->tags_on_last_queuefull = dev->active;
dev->last_queuefull_same_count = 0;
}
ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahc_set_scsi_status(scb, SCSI_STATUS_OK);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
break;
}
/*
* Drop down to a single opening, and treat this
* as if the target returned BUSY SCSI status.
*/
dev->openings = 1;
ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
break;
}
}
}
static void
ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
{
/*
* Map CAM error codes into Linux Error codes. We
* avoid the conversion so that the DV code has the
* full error information available when making
* state change decisions.
*/
{
u_int new_status;
switch (ahc_cmd_get_transaction_status(cmd)) {
case CAM_REQ_INPROG:
case CAM_REQ_CMP:
case CAM_SCSI_STATUS_ERROR:
new_status = DID_OK;
break;
case CAM_REQ_ABORTED:
new_status = DID_ABORT;
break;
case CAM_BUSY:
new_status = DID_BUS_BUSY;
break;
case CAM_REQ_INVALID:
case CAM_PATH_INVALID:
new_status = DID_BAD_TARGET;
break;
case CAM_SEL_TIMEOUT:
new_status = DID_NO_CONNECT;
break;
case CAM_SCSI_BUS_RESET:
case CAM_BDR_SENT:
new_status = DID_RESET;
break;
case CAM_UNCOR_PARITY:
new_status = DID_PARITY;
break;
case CAM_CMD_TIMEOUT:
new_status = DID_TIME_OUT;
break;
case CAM_UA_ABORT:
case CAM_REQ_CMP_ERR:
case CAM_AUTOSENSE_FAIL:
case CAM_NO_HBA:
case CAM_DATA_RUN_ERR:
case CAM_UNEXP_BUSFREE:
case CAM_SEQUENCE_FAIL:
case CAM_CCB_LEN_ERR:
case CAM_PROVIDE_FAIL:
case CAM_REQ_TERMIO:
case CAM_UNREC_HBA_ERROR:
case CAM_REQ_TOO_BIG:
new_status = DID_ERROR;
break;
case CAM_REQUEUE_REQ:
new_status = DID_REQUEUE;
break;
default:
/* We should never get here */
new_status = DID_ERROR;
break;
}
ahc_cmd_set_transaction_status(cmd, new_status);
}
cmd->scsi_done(cmd);
}
static void
ahc_linux_freeze_simq(struct ahc_softc *ahc)
{
unsigned long s;
ahc_lock(ahc, &s);
ahc->platform_data->qfrozen++;
if (ahc->platform_data->qfrozen == 1) {
scsi_block_requests(ahc->platform_data->host);
/* XXX What about Twin channels? */
ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
CAM_LUN_WILDCARD, SCB_LIST_NULL,
ROLE_INITIATOR, CAM_REQUEUE_REQ);
}
ahc_unlock(ahc, &s);
}
static void
ahc_linux_release_simq(struct ahc_softc *ahc)
{
u_long s;
int unblock_reqs;
unblock_reqs = 0;
ahc_lock(ahc, &s);
if (ahc->platform_data->qfrozen > 0)
ahc->platform_data->qfrozen--;
if (ahc->platform_data->qfrozen == 0)
unblock_reqs = 1;
ahc_unlock(ahc, &s);
/*
* There is still a race here. The mid-layer
* should keep its own freeze count and use
* a bottom half handler to run the queues
* so we can unblock with our own lock held.
*/
if (unblock_reqs)
scsi_unblock_requests(ahc->platform_data->host);
}
static int
ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev;
struct scb *pending_scb;
u_int saved_scbptr;
u_int active_scb_index;
u_int last_phase;
u_int saved_scsiid;
u_int cdb_byte;
int retval;
int was_paused;
int paused;
int wait;
int disconnected;
unsigned long flags;
pending_scb = NULL;
paused = FALSE;
wait = FALSE;
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
printk("CDB:");
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
printk(" 0x%x", cmd->cmnd[cdb_byte]);
printk("\n");
ahc_lock(ahc, &flags);
/*
* First determine if we currently own this command.
* Start by searching the device queue. If not found
* there, check the pending_scb list. If not found
* at all, and the system wanted us to just abort the
* command, return success.
*/
dev = scsi_transport_device_data(cmd->device);
if (dev == NULL) {
/*
* No target device for this command exists,
* so we must not still own the command.
*/
printk("%s:%d:%d:%d: Is not an active device\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
retval = SUCCESS;
goto no_cmd;
}
if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
&& ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun,
CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
printk("%s:%d:%d:%d: Command found on untagged queue\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
retval = SUCCESS;
goto done;
}
/*
* See if we can find a matching cmd in the pending list.
*/
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
if (pending_scb->io_ctx == cmd)
break;
}
if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR))
break;
}
}
if (pending_scb == NULL) {
scmd_printk(KERN_INFO, cmd, "Command not found\n");
goto no_cmd;
}
if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
/*
* We can't queue two recovery actions using the same SCB
*/
retval = FAILED;
goto done;
}
/*
* Ensure that the card doesn't do anything
* behind our back and that we didn't "just" miss
* an interrupt that would affect this cmd.
*/
was_paused = ahc_is_paused(ahc);
ahc_pause_and_flushwork(ahc);
paused = TRUE;
if ((pending_scb->flags & SCB_ACTIVE) == 0) {
scmd_printk(KERN_INFO, cmd, "Command already completed\n");
goto no_cmd;
}
printk("%s: At time of recovery, card was %spaused\n",
ahc_name(ahc), was_paused ? "" : "not ");
ahc_dump_card_state(ahc);
disconnected = TRUE;
if (flag == SCB_ABORT) {
if (ahc_search_qinfifo(ahc, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun,
pending_scb->hscb->tag,
ROLE_INITIATOR, CAM_REQ_ABORTED,
SEARCH_COMPLETE) > 0) {
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
ahc_name(ahc), cmd->device->channel,
cmd->device->id, cmd->device->lun);
retval = SUCCESS;
goto done;
}
} else if (ahc_search_qinfifo(ahc, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun, pending_scb->hscb->tag,
ROLE_INITIATOR, /*status*/0,
SEARCH_COUNT) > 0) {
disconnected = FALSE;
}
if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
struct scb *bus_scb;
bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
if (bus_scb == pending_scb)
disconnected = FALSE;
else if (flag != SCB_ABORT
&& ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
&& ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
disconnected = FALSE;
}
/*
* At this point, pending_scb is the scb associated with the
* passed in command. That command is currently active on the
* bus, is in the disconnected state, or we're hoping to find
* a command for the same target active on the bus to abuse to
* send a BDR. Queue the appropriate message based on which of
* these states we are in.
*/
last_phase = ahc_inb(ahc, LASTPHASE);
saved_scbptr = ahc_inb(ahc, SCBPTR);
active_scb_index = ahc_inb(ahc, SCB_TAG);
saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
if (last_phase != P_BUSFREE
&& (pending_scb->hscb->tag == active_scb_index
|| (flag == SCB_DEVICE_RESET
&& SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
/*
* We're active on the bus, so assert ATN
* and hope that the target responds.
*/
pending_scb = ahc_lookup_scb(ahc, active_scb_index);
pending_scb->flags |= SCB_RECOVERY_SCB|flag;
ahc_outb(ahc, MSG_OUT, HOST_MSG);
ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
wait = TRUE;
} else if (disconnected) {
/*
* Actually re-queue this SCB in an attempt
* to select the device before it reconnects.
* In either case (selection or reselection),
* we will now issue the approprate message
* to the timed-out device.
*
* Set the MK_MESSAGE control bit indicating
* that we desire to send a message. We
* also set the disconnected flag since
* in the paging case there is no guarantee
* that our SCB control byte matches the
* version on the card. We don't want the
* sequencer to abort the command thinking
* an unsolicited reselection occurred.
*/
pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
pending_scb->flags |= SCB_RECOVERY_SCB|flag;
/*
* Remove any cached copy of this SCB in the
* disconnected list in preparation for the
* queuing of our abort SCB. We use the
* same element in the SCB, SCB_NEXT, for
* both the qinfifo and the disconnected list.
*/
ahc_search_disc_list(ahc, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun, pending_scb->hscb->tag,
/*stop_on_first*/TRUE,
/*remove*/TRUE,
/*save_state*/FALSE);
/*
* In the non-paging case, the sequencer will
* never re-reference the in-core SCB.
* To make sure we are notified during
* reselection, set the MK_MESSAGE flag in
* the card's copy of the SCB.
*/
if ((ahc->flags & AHC_PAGESCBS) == 0) {
ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
ahc_outb(ahc, SCB_CONTROL,
ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
}
/*
* Clear out any entries in the QINFIFO first
* so we are the next SCB for this target
* to run.
*/
ahc_search_qinfifo(ahc, cmd->device->id,
cmd->device->channel + 'A',
cmd->device->lun, SCB_LIST_NULL,
ROLE_INITIATOR, CAM_REQUEUE_REQ,
SEARCH_COMPLETE);
ahc_qinfifo_requeue_tail(ahc, pending_scb);
ahc_outb(ahc, SCBPTR, saved_scbptr);
ahc_print_path(ahc, pending_scb);
printk("Device is disconnected, re-queuing SCB\n");
wait = TRUE;
} else {
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
retval = FAILED;
goto done;
}
no_cmd:
/*
* Our assumption is that if we don't have the command, no
* recovery action was required, so we return success. Again,
* the semantics of the mid-layer recovery engine are not
* well defined, so this may change in time.
*/
retval = SUCCESS;
done:
if (paused)
ahc_unpause(ahc);
if (wait) {
DECLARE_COMPLETION_ONSTACK(done);
ahc->platform_data->eh_done = &done;
ahc_unlock(ahc, &flags);
printk("Recovery code sleeping\n");
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
ahc_lock(ahc, &flags);
ahc->platform_data->eh_done = NULL;
ahc_unlock(ahc, &flags);
printk("Timer Expired\n");
retval = FAILED;
}
printk("Recovery code awake\n");
} else
ahc_unlock(ahc, &flags);
return (retval);
}
void
ahc_platform_dump_card_state(struct ahc_softc *ahc)
{
}
static void ahc_linux_set_width(struct scsi_target *starget, int width)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_devinfo devinfo;
unsigned long flags;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
ahc_lock(ahc, &flags);
ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
static void ahc_linux_set_period(struct scsi_target *starget, int period)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahc_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options;
unsigned long flags;
unsigned long offset = tinfo->goal.offset;
const struct ahc_syncrate *syncrate;
if (offset == 0)
offset = MAX_OFFSET;
if (period < 9)
period = 9; /* 12.5ns is our minimum */
if (period == 9) {
if (spi_max_width(starget))
ppr_options |= MSG_EXT_PPR_DT_REQ;
else
/* need wide for DT and need DT for 12.5 ns */
period = 10;
}
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
/* all PPR requests apart from QAS require wide transfers */
if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
if (spi_width(starget) == 0)
ppr_options &= MSG_EXT_PPR_QAS_REQ;
}
syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahc_devinfo devinfo;
unsigned int ppr_options = 0;
unsigned int period = 0;
unsigned long flags;
const struct ahc_syncrate *syncrate = NULL;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
if (offset != 0) {
syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
period = tinfo->goal.period;
ppr_options = tinfo->goal.ppr_options;
}
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahc_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_DT_REQ;
unsigned int period = tinfo->goal.period;
unsigned int width = tinfo->goal.width;
unsigned long flags;
const struct ahc_syncrate *syncrate;
if (dt && spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_DT_REQ;
if (!width)
ahc_linux_set_width(starget, 1);
} else if (period == 9)
period = 10; /* if resetting DT, period must be >= 25ns */
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
#if 0
/* FIXME: This code claims to support IU and QAS. However, the actual
* sequencer code and aic7xxx_core have no support for these parameters and
* will get into a bad state if they're negotiated. Do not enable this
* unless you know what you're doing */
static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahc_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_QAS_REQ;
unsigned int period = tinfo->goal.period;
unsigned long flags;
struct ahc_syncrate *syncrate;
if (qas)
ppr_options |= MSG_EXT_PPR_QAS_REQ;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
struct ahc_devinfo devinfo;
unsigned int ppr_options = tinfo->goal.ppr_options
& ~MSG_EXT_PPR_IU_REQ;
unsigned int period = tinfo->goal.period;
unsigned long flags;
struct ahc_syncrate *syncrate;
if (iu)
ppr_options |= MSG_EXT_PPR_IU_REQ;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
ahc_unlock(ahc, &flags);
}
#endif
static void ahc_linux_get_signalling(struct Scsi_Host *shost)
{
struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
unsigned long flags;
u8 mode;
if (!(ahc->features & AHC_ULTRA2)) {
/* non-LVD chipset, may not have SBLKCTL reg */
spi_signalling(shost) =
ahc->features & AHC_HVD ?
SPI_SIGNAL_HVD :
SPI_SIGNAL_SE;
return;
}
ahc_lock(ahc, &flags);
ahc_pause(ahc);
mode = ahc_inb(ahc, SBLKCTL);
ahc_unpause(ahc);
ahc_unlock(ahc, &flags);
if (mode & ENAB40)
spi_signalling(shost) = SPI_SIGNAL_LVD;
else if (mode & ENAB20)
spi_signalling(shost) = SPI_SIGNAL_SE;
else
spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
}
static struct spi_function_template ahc_linux_transport_functions = {
.set_offset = ahc_linux_set_offset,
.show_offset = 1,
.set_period = ahc_linux_set_period,
.show_period = 1,
.set_width = ahc_linux_set_width,
.show_width = 1,
.set_dt = ahc_linux_set_dt,
.show_dt = 1,
#if 0
.set_iu = ahc_linux_set_iu,
.show_iu = 1,
.set_qas = ahc_linux_set_qas,
.show_qas = 1,
#endif
.get_signalling = ahc_linux_get_signalling,
};
static int __init
ahc_linux_init(void)
{
/*
* If we've been passed any parameters, process them now.
*/
if (aic7xxx)
aic7xxx_setup(aic7xxx);
ahc_linux_transport_template =
spi_attach_transport(&ahc_linux_transport_functions);
if (!ahc_linux_transport_template)
return -ENODEV;
scsi_transport_reserve_device(ahc_linux_transport_template,
sizeof(struct ahc_linux_device));
ahc_linux_pci_init();
ahc_linux_eisa_init();
return 0;
}
static void
ahc_linux_exit(void)
{
ahc_linux_pci_exit();
ahc_linux_eisa_exit();
spi_release_transport(ahc_linux_transport_template);
}
module_init(ahc_linux_init);
module_exit(ahc_linux_exit);
| gpl-2.0 |
lim417dev/android_kernel_nubia_nx505j | sound/usb/usx2y/usbusx2y.c | 7182 | 13552 | /*
* usbusy2y.c - ALSA USB US-428 Driver
*
2005-04-14 Karsten Wiese
Version 0.8.7.2:
Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom.
Tested ok with kernel 2.6.12-rc2.
2004-12-14 Karsten Wiese
Version 0.8.7.1:
snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open.
2004-12-02 Karsten Wiese
Version 0.8.7:
Use macro usb_maxpacket() for portability.
2004-10-26 Karsten Wiese
Version 0.8.6:
wake_up() process waiting in usX2Y_urbs_start() on error.
2004-10-21 Karsten Wiese
Version 0.8.5:
nrpacks is runtime or compiletime configurable now with tested values from 1 to 4.
2004-10-03 Karsten Wiese
Version 0.8.2:
Avoid any possible racing while in prepare callback.
2004-09-30 Karsten Wiese
Version 0.8.0:
Simplified things and made ohci work again.
2004-09-20 Karsten Wiese
Version 0.7.3:
Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb().
2004-07-13 Karsten Wiese
Version 0.7.1:
Don't sleep in START/STOP callbacks anymore.
us428 channels C/D not handled just for this version, sorry.
2004-06-21 Karsten Wiese
Version 0.6.4:
Temporarely suspend midi input
to sanely call usb_set_interface() when setting format.
2004-06-12 Karsten Wiese
Version 0.6.3:
Made it thus the following rule is enforced:
"All pcm substreams of one usX2Y have to operate at the same rate & format."
2004-04-06 Karsten Wiese
Version 0.6.0:
Runs on 2.6.5 kernel without any "--with-debug=" things.
us224 reported running.
2004-01-14 Karsten Wiese
Version 0.5.1:
Runs with 2.6.1 kernel.
2003-12-30 Karsten Wiese
Version 0.4.1:
Fix 24Bit 4Channel capturing for the us428.
2003-11-27 Karsten Wiese, Martin Langer
Version 0.4:
us122 support.
us224 could be tested by uncommenting the sections containing USB_ID_US224
2003-11-03 Karsten Wiese
Version 0.3:
24Bit support.
"arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works.
2003-08-22 Karsten Wiese
Version 0.0.8:
Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader.
See:
http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz
2003-06-18 Karsten Wiese
Version 0.0.5:
changed to compile with kernel 2.4.21 and alsa 0.9.4
2002-10-16 Karsten Wiese
Version 0.0.4:
compiles again with alsa-current.
USB_ISO_ASAP not used anymore (most of the time), instead
urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore.
To get the best out of this:
Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds.
This helped me much on my slowish PII 400 & PIII 500.
ACPI yet untested but might cause the same bad behaviour.
Use a kernel with lowlatency and preemptiv patches applied.
To autoload snd-usb-midi append a line
post-install snd-usb-us428 modprobe snd-usb-midi
to /etc/modules.conf.
known problems:
sliders, knobs, lights not yet handled except MASTER Volume slider.
"pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does.
KDE3: "Enable full duplex operation" deadlocks.
2002-08-31 Karsten Wiese
Version 0.0.3: audio also simplex;
simplifying: iso urbs only 1 packet, melted structs.
ASYNC_UNLINK not used anymore: no more crashes so far.....
for alsa 0.9 rc3.
2002-08-09 Karsten Wiese
Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol.
The firmware has been sniffed from win2k us-428 driver 3.09.
* Copyright (c) 2002 - 2004 Karsten Wiese
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
#include "usx2y.h"
#include "usbusx2y.h"
#include "usX2Yhwdep.h"
MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");
MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604), "NAME_ALLCAPS"(0x8001)(0x8005)(0x8007) }}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS".");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS".");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS".");
static int snd_usX2Y_card_used[SNDRV_CARDS];
static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr);
static void snd_usX2Y_card_private_free(struct snd_card *card);
/*
* pipe 4 is used for switching the lamps, setting samplerate, volumes ....
*/
static void i_usX2Y_Out04Int(struct urb *urb)
{
#ifdef CONFIG_SND_DEBUG
if (urb->status) {
int i;
struct usX2Ydev *usX2Y = urb->context;
for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++);
snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status);
}
#endif
}
static void i_usX2Y_In04Int(struct urb *urb)
{
int err = 0;
struct usX2Ydev *usX2Y = urb->context;
struct us428ctls_sharedmem *us428ctls = usX2Y->us428ctls_sharedmem;
usX2Y->In04IntCalls++;
if (urb->status) {
snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status);
return;
}
// printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!?
if (us428ctls) {
int diff = -1;
if (-2 == us428ctls->CtlSnapShotLast) {
diff = 0;
memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last));
us428ctls->CtlSnapShotLast = -1;
} else {
int i;
for (i = 0; i < 21; i++) {
if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) {
if (diff < 0)
diff = i;
usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i];
}
}
}
if (0 <= diff) {
int n = us428ctls->CtlSnapShotLast + 1;
if (n >= N_us428_ctl_BUFS || n < 0)
n = 0;
memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0]));
us428ctls->CtlSnapShotDiffersAt[n] = diff;
us428ctls->CtlSnapShotLast = n;
wake_up(&usX2Y->us428ctls_wait_queue_head);
}
}
if (usX2Y->US04) {
if (0 == usX2Y->US04->submitted)
do {
err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC);
} while (!err && usX2Y->US04->submitted < usX2Y->US04->len);
} else
if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) {
if (us428ctls->p4outLast != us428ctls->p4outSent) {
int j, send = us428ctls->p4outSent + 1;
if (send >= N_us428_p4out_BUFS)
send = 0;
for (j = 0; j < URBS_AsyncSeq && !err; ++j)
if (0 == usX2Y->AS04.urb[j]->status) {
struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost.
usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev,
usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol,
p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5,
i_usX2Y_Out04Int, usX2Y);
err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC);
us428ctls->p4outSent = send;
break;
}
}
}
if (err)
snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err);
urb->dev = usX2Y->dev;
usb_submit_urb(urb, GFP_ATOMIC);
}
/*
* Prepare some urbs
*/
int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y)
{
int err = 0,
i;
if (NULL == (usX2Y->AS04.buffer = kmalloc(URB_DataLen_AsyncSeq*URBS_AsyncSeq, GFP_KERNEL))) {
err = -ENOMEM;
} else
for (i = 0; i < URBS_AsyncSeq; ++i) {
if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) {
err = -ENOMEM;
break;
}
usb_fill_bulk_urb( usX2Y->AS04.urb[i], usX2Y->dev,
usb_sndbulkpipe(usX2Y->dev, 0x04),
usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0,
i_usX2Y_Out04Int, usX2Y
);
}
return err;
}
int usX2Y_In04_init(struct usX2Ydev *usX2Y)
{
if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL)))
return -ENOMEM;
if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL))) {
usb_free_urb(usX2Y->In04urb);
return -ENOMEM;
}
init_waitqueue_head(&usX2Y->In04WaitQueue);
usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4),
usX2Y->In04Buf, 21,
i_usX2Y_In04Int, usX2Y,
10);
return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
}
static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S)
{
int i;
for (i = 0; i < URBS_AsyncSeq; ++i) {
if (S[i].urb) {
usb_kill_urb(S->urb[i]);
usb_free_urb(S->urb[i]);
S->urb[i] = NULL;
}
}
kfree(S->buffer);
}
static struct usb_device_id snd_usX2Y_usb_id_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
.idProduct = USB_ID_US428
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
.idProduct = USB_ID_US122
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x1604,
.idProduct = USB_ID_US224
},
{ /* terminator */ }
};
static int usX2Y_create_card(struct usb_device *device, struct snd_card **cardp)
{
int dev;
struct snd_card * card;
int err;
for (dev = 0; dev < SNDRV_CARDS; ++dev)
if (enable[dev] && !snd_usX2Y_card_used[dev])
break;
if (dev >= SNDRV_CARDS)
return -ENODEV;
err = snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(struct usX2Ydev), &card);
if (err < 0)
return err;
snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1;
card->private_free = snd_usX2Y_card_private_free;
usX2Y(card)->dev = device;
init_waitqueue_head(&usX2Y(card)->prepare_wait_queue);
mutex_init(&usX2Y(card)->prepare_mutex);
INIT_LIST_HEAD(&usX2Y(card)->midi_list);
strcpy(card->driver, "USB "NAME_ALLCAPS"");
sprintf(card->shortname, "TASCAM "NAME_ALLCAPS"");
sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)",
card->shortname,
le16_to_cpu(device->descriptor.idVendor),
le16_to_cpu(device->descriptor.idProduct),
0,//us428(card)->usbmidi.ifnum,
usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum
);
*cardp = card;
return 0;
}
static int usX2Y_usb_probe(struct usb_device *device,
struct usb_interface *intf,
const struct usb_device_id *device_id,
struct snd_card **cardp)
{
int err;
struct snd_card * card;
*cardp = NULL;
if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 ||
(le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 &&
le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 &&
le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428))
return -EINVAL;
err = usX2Y_create_card(device, &card);
if (err < 0)
return err;
snd_card_set_dev(card, &intf->dev);
if ((err = usX2Y_hwdep_new(card, device)) < 0 ||
(err = snd_card_register(card)) < 0) {
snd_card_free(card);
return err;
}
*cardp = card;
return 0;
}
/*
* new 2.5 USB kernel API
*/
static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct snd_card *card;
int err;
err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card);
if (err < 0)
return err;
dev_set_drvdata(&intf->dev, card);
return 0;
}
static void snd_usX2Y_disconnect(struct usb_interface *intf)
{
usX2Y_usb_disconnect(interface_to_usbdev(intf),
usb_get_intfdata(intf));
}
MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table);
static struct usb_driver snd_usX2Y_usb_driver = {
.name = "snd-usb-usx2y",
.probe = snd_usX2Y_probe,
.disconnect = snd_usX2Y_disconnect,
.id_table = snd_usX2Y_usb_id_table,
};
static void snd_usX2Y_card_private_free(struct snd_card *card)
{
kfree(usX2Y(card)->In04Buf);
usb_free_urb(usX2Y(card)->In04urb);
if (usX2Y(card)->us428ctls_sharedmem)
snd_free_pages(usX2Y(card)->us428ctls_sharedmem, sizeof(*usX2Y(card)->us428ctls_sharedmem));
if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS)
snd_usX2Y_card_used[usX2Y(card)->card_index] = 0;
}
/*
* Frees the device.
*/
static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr)
{
if (ptr) {
struct snd_card *card = ptr;
struct usX2Ydev *usX2Y = usX2Y(card);
struct list_head *p;
usX2Y->chip_status = USX2Y_STAT_CHIP_HUP;
usX2Y_unlinkSeq(&usX2Y->AS04);
usb_kill_urb(usX2Y->In04urb);
snd_card_disconnect(card);
/* release the midi resources */
list_for_each(p, &usX2Y->midi_list) {
snd_usbmidi_disconnect(p);
}
if (usX2Y->us428ctls_sharedmem)
wake_up(&usX2Y->us428ctls_wait_queue_head);
snd_card_free(card);
}
}
module_usb_driver(snd_usX2Y_usb_driver);
| gpl-2.0 |
mujeebulhasan/kernel | fs/fscache/object-list.c | 8462 | 12220 | /* Global fscache object list maintainer and viewer
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/key.h>
#include <keys/user-type.h>
#include "internal.h"
static struct rb_root fscache_object_list;
static DEFINE_RWLOCK(fscache_object_list_lock);
struct fscache_objlist_data {
unsigned long config; /* display configuration */
#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with work */
#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without work */
u8 buf[512]; /* key and aux data buffer */
};
/*
* Add an object to the object list
* - we use the address of the fscache_object structure as the key into the
* tree
*/
void fscache_objlist_add(struct fscache_object *obj)
{
struct fscache_object *xobj;
struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
write_lock(&fscache_object_list_lock);
while (*p) {
parent = *p;
xobj = rb_entry(parent, struct fscache_object, objlist_link);
if (obj < xobj)
p = &(*p)->rb_left;
else if (obj > xobj)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&obj->objlist_link, parent, p);
rb_insert_color(&obj->objlist_link, &fscache_object_list);
write_unlock(&fscache_object_list_lock);
}
/**
* fscache_object_destroy - Note that a cache object is about to be destroyed
* @object: The object to be destroyed
*
* Note the imminent destruction and deallocation of a cache object record.
*/
void fscache_object_destroy(struct fscache_object *obj)
{
write_lock(&fscache_object_list_lock);
BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
rb_erase(&obj->objlist_link, &fscache_object_list);
write_unlock(&fscache_object_list_lock);
}
EXPORT_SYMBOL(fscache_object_destroy);
/*
* find the object in the tree on or after the specified index
*/
static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
{
struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
struct rb_node *p;
unsigned long pos;
if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
return NULL;
pos = *_pos;
/* banners (can't represent line 0 by pos 0 as that would involve
* returning a NULL pointer) */
if (pos == 0)
return (struct fscache_object *)(long)++(*_pos);
if (pos < 3)
return (struct fscache_object *)pos;
pobj = (struct fscache_object *)pos;
p = fscache_object_list.rb_node;
while (p) {
obj = rb_entry(p, struct fscache_object, objlist_link);
if (pobj < obj) {
if (!minobj || minobj > obj)
minobj = obj;
p = p->rb_left;
} else if (pobj > obj) {
p = p->rb_right;
} else {
minobj = obj;
break;
}
obj = NULL;
}
if (!minobj)
*_pos = (unsigned long) ERR_PTR(-ENOENT);
else if (minobj != obj)
*_pos = (unsigned long) minobj;
return minobj;
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
__acquires(&fscache_object_list_lock)
{
read_lock(&fscache_object_list_lock);
return fscache_objlist_lookup(_pos);
}
/*
* move to the next line
*/
static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
{
(*_pos)++;
return fscache_objlist_lookup(_pos);
}
/*
* clean up after reading
*/
static void fscache_objlist_stop(struct seq_file *m, void *v)
__releases(&fscache_object_list_lock)
{
read_unlock(&fscache_object_list_lock);
}
/*
* display an object
*/
static int fscache_objlist_show(struct seq_file *m, void *v)
{
struct fscache_objlist_data *data = m->private;
struct fscache_object *obj = v;
unsigned long config = data->config;
uint16_t keylen, auxlen;
char _type[3], *type;
bool no_cookie;
u8 *buf = data->buf, *p;
if ((unsigned long) v == 1) {
seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
" EM EV F S"
" | NETFS_COOKIE_DEF TY FL NETFS_DATA");
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, " ");
if (config & FSCACHE_OBJLIST_CONFIG_KEY)
seq_puts(m, "OBJECT_KEY");
if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX)) ==
(FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, ", ");
if (config & FSCACHE_OBJLIST_CONFIG_AUX)
seq_puts(m, "AUX_DATA");
seq_puts(m, "\n");
return 0;
}
if ((unsigned long) v == 2) {
seq_puts(m, "======== ======== ==== ===== === === === == ====="
" == == = ="
" | ================ == == ================");
if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
FSCACHE_OBJLIST_CONFIG_AUX))
seq_puts(m, " ================");
seq_puts(m, "\n");
return 0;
}
/* filter out any unwanted objects */
#define FILTER(criterion, _yes, _no) \
do { \
unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
if (criterion) { \
if (!(config & yes)) \
return 0; \
} else { \
if (!(config & no)) \
return 0; \
} \
} while(0)
if (~config) {
FILTER(obj->cookie,
COOKIE, NOCOOKIE);
FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
obj->n_ops != 0 ||
obj->n_obj_ops != 0 ||
obj->flags ||
!list_empty(&obj->dependents),
BUSY, IDLE);
FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
PENDWR, NOPENDWR);
FILTER(atomic_read(&obj->n_reads),
READS, NOREADS);
FILTER(obj->events & obj->event_mask,
EVENTS, NOEVENTS);
FILTER(work_busy(&obj->work), WORK, NOWORK);
}
seq_printf(m,
"%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ",
obj->debug_id,
obj->parent ? obj->parent->debug_id : -1,
fscache_object_states_short[obj->state],
obj->n_children,
obj->n_ops,
obj->n_obj_ops,
obj->n_in_progress,
obj->n_exclusive,
atomic_read(&obj->n_reads),
obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
obj->events,
obj->flags,
work_busy(&obj->work));
no_cookie = true;
keylen = auxlen = 0;
if (obj->cookie) {
spin_lock(&obj->lock);
if (obj->cookie) {
switch (obj->cookie->def->type) {
case 0:
type = "IX";
break;
case 1:
type = "DT";
break;
default:
sprintf(_type, "%02u",
obj->cookie->def->type);
type = _type;
break;
}
seq_printf(m, "%-16s %s %2lx %16p",
obj->cookie->def->name,
type,
obj->cookie->flags,
obj->cookie->netfs_data);
if (obj->cookie->def->get_key &&
config & FSCACHE_OBJLIST_CONFIG_KEY)
keylen = obj->cookie->def->get_key(
obj->cookie->netfs_data,
buf, 400);
if (obj->cookie->def->get_aux &&
config & FSCACHE_OBJLIST_CONFIG_AUX)
auxlen = obj->cookie->def->get_aux(
obj->cookie->netfs_data,
buf + keylen, 512 - keylen);
no_cookie = false;
}
spin_unlock(&obj->lock);
if (!no_cookie && (keylen > 0 || auxlen > 0)) {
seq_printf(m, " ");
for (p = buf; keylen > 0; keylen--)
seq_printf(m, "%02x", *p++);
if (auxlen > 0) {
if (config & FSCACHE_OBJLIST_CONFIG_KEY)
seq_printf(m, ", ");
for (; auxlen > 0; auxlen--)
seq_printf(m, "%02x", *p++);
}
}
}
if (no_cookie)
seq_printf(m, "<no_cookie>\n");
else
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations fscache_objlist_ops = {
.start = fscache_objlist_start,
.stop = fscache_objlist_stop,
.next = fscache_objlist_next,
.show = fscache_objlist_show,
};
/*
* get the configuration for filtering the list
*/
static void fscache_objlist_config(struct fscache_objlist_data *data)
{
#ifdef CONFIG_KEYS
struct user_key_payload *confkey;
unsigned long config;
struct key *key;
const char *buf;
int len;
key = request_key(&key_type_user, "fscache:objlist", NULL);
if (IS_ERR(key))
goto no_config;
config = 0;
rcu_read_lock();
confkey = key->payload.data;
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
switch (buf[len]) {
case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
}
}
rcu_read_unlock();
key_put(key);
if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
data->config = config;
return;
no_config:
#endif
data->config = ULONG_MAX;
}
/*
* open "/proc/fs/fscache/objects" to provide a list of active objects
* - can be configured by a user-defined key added to the caller's keyrings
*/
static int fscache_objlist_open(struct inode *inode, struct file *file)
{
struct fscache_objlist_data *data;
struct seq_file *m;
int ret;
ret = seq_open(file, &fscache_objlist_ops);
if (ret < 0)
return ret;
m = file->private_data;
/* buffer for key extraction */
data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
if (!data) {
seq_release(inode, file);
return -ENOMEM;
}
/* get the configuration key */
fscache_objlist_config(data);
m->private = data;
return 0;
}
/*
* clean up on close
*/
static int fscache_objlist_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
kfree(m->private);
m->private = NULL;
return seq_release(inode, file);
}
const struct file_operations fscache_objlist_fops = {
.owner = THIS_MODULE,
.open = fscache_objlist_open,
.read = seq_read,
.llseek = seq_lseek,
.release = fscache_objlist_release,
};
| gpl-2.0 |
alexax66/LP-Kernel-a3ltexx | drivers/pcmcia/pxa2xx_mainstone.c | 9742 | 4364 | /*
* linux/drivers/pcmcia/pxa2xx_mainstone.c
*
* Mainstone PCMCIA specific routines.
*
* Created: May 12, 2004
* Author: Nicolas Pitre
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <pcmcia/ss.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <mach/pxa2xx-regs.h>
#include <mach/mainstone.h>
#include "soc_common.h"
static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
/*
* Setup default state of GPIO outputs
* before we enable them as outputs.
*/
if (skt->nr == 0) {
skt->socket.pci_irq = MAINSTONE_S0_IRQ;
skt->stat[SOC_STAT_CD].irq = MAINSTONE_S0_CD_IRQ;
skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S0_STSCHG_IRQ;
skt->stat[SOC_STAT_BVD1].name = "PCMCIA0 STSCHG";
} else {
skt->socket.pci_irq = MAINSTONE_S1_IRQ;
skt->stat[SOC_STAT_CD].irq = MAINSTONE_S1_CD_IRQ;
skt->stat[SOC_STAT_CD].name = "PCMCIA1 CD";
skt->stat[SOC_STAT_BVD1].irq = MAINSTONE_S1_STSCHG_IRQ;
skt->stat[SOC_STAT_BVD1].name = "PCMCIA1 STSCHG";
}
return 0;
}
static unsigned long mst_pcmcia_status[2];
static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
unsigned long status, flip;
status = (skt->nr == 0) ? MST_PCMCIA0 : MST_PCMCIA1;
flip = (status ^ mst_pcmcia_status[skt->nr]) & MST_PCMCIA_nSTSCHG_BVD1;
/*
* Workaround for STSCHG which can't be deasserted:
* We therefore disable/enable corresponding IRQs
* as needed to avoid IRQ locks.
*/
if (flip) {
mst_pcmcia_status[skt->nr] = status;
if (status & MST_PCMCIA_nSTSCHG_BVD1)
enable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ
: MAINSTONE_S1_STSCHG_IRQ );
else
disable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ
: MAINSTONE_S1_STSCHG_IRQ );
}
state->detect = (status & MST_PCMCIA_nCD) ? 0 : 1;
state->ready = (status & MST_PCMCIA_nIRQ) ? 1 : 0;
state->bvd1 = (status & MST_PCMCIA_nSTSCHG_BVD1) ? 1 : 0;
state->bvd2 = (status & MST_PCMCIA_nSPKR_BVD2) ? 1 : 0;
state->vs_3v = (status & MST_PCMCIA_nVS1) ? 0 : 1;
state->vs_Xv = (status & MST_PCMCIA_nVS2) ? 0 : 1;
}
static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
unsigned long power = 0;
int ret = 0;
switch (state->Vcc) {
case 0: power |= MST_PCMCIA_PWR_VCC_0; break;
case 33: power |= MST_PCMCIA_PWR_VCC_33; break;
case 50: power |= MST_PCMCIA_PWR_VCC_50; break;
default:
printk(KERN_ERR "%s(): bad Vcc %u\n",
__func__, state->Vcc);
ret = -1;
}
switch (state->Vpp) {
case 0: power |= MST_PCMCIA_PWR_VPP_0; break;
case 120: power |= MST_PCMCIA_PWR_VPP_120; break;
default:
if(state->Vpp == state->Vcc) {
power |= MST_PCMCIA_PWR_VPP_VCC;
} else {
printk(KERN_ERR "%s(): bad Vpp %u\n",
__func__, state->Vpp);
ret = -1;
}
}
if (state->flags & SS_RESET)
power |= MST_PCMCIA_RESET;
switch (skt->nr) {
case 0: MST_PCMCIA0 = power; break;
case 1: MST_PCMCIA1 = power; break;
default: ret = -1;
}
return ret;
}
static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = mst_pcmcia_hw_init,
.socket_state = mst_pcmcia_socket_state,
.configure_socket = mst_pcmcia_configure_socket,
.nr = 2,
};
static struct platform_device *mst_pcmcia_device;
static int __init mst_pcmcia_init(void)
{
int ret;
if (!machine_is_mainstone())
return -ENODEV;
mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!mst_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops,
sizeof(mst_pcmcia_ops));
if (ret == 0)
ret = platform_device_add(mst_pcmcia_device);
if (ret)
platform_device_put(mst_pcmcia_device);
return ret;
}
static void __exit mst_pcmcia_exit(void)
{
platform_device_unregister(mst_pcmcia_device);
}
fs_initcall(mst_pcmcia_init);
module_exit(mst_pcmcia_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.