repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
scue/android_kernel_lenovo_stuttgart-base | drivers/misc/sgi-xp/xpc_channel.c | 12360 | 28240 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) channel support.
*
* This is the part of XPC that manages the channels and
* sends/receives messages across them to/from other partitions.
*
*/
#include <linux/device.h>
#include "xpc.h"
/*
* Process a connect message from a remote partition.
*
* Note: xpc_process_connect() is expecting to be called with the
* spin_lock_irqsave held and will leave it locked upon return.
*/
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
/* nothing more to do for now */
return;
}
DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
if (!(ch->flags & XPC_C_SETUP)) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
ret = xpc_arch_ops.setup_msg_structures(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
else
ch->flags |= XPC_C_SETUP;
if (ch->flags & XPC_C_DISCONNECTING)
return;
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
ch->flags |= XPC_C_OPENREPLY;
xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENREPLY))
return;
if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENCOMPLETE))
return;
dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid);
ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
}
/*
* spin_lock_irqsave() is expected to be held on entry.
*/
static void
xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
DBUG_ON(!spin_is_locked(&ch->lock));
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
/* make sure all activity has settled down first */
if (atomic_read(&ch->kthreads_assigned) > 0 ||
atomic_read(&ch->references) > 0) {
return;
}
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
if (xpc_arch_ops.partition_engaged(ch->partid))
return;
} else {
/* as long as the other side is up do the full protocol */
if (!(ch->flags & XPC_C_RCLOSEREQUEST))
return;
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_RCLOSEREPLY))
return;
}
/* wake those waiting for notify completion */
if (atomic_read(&ch->n_to_notify) > 0) {
/* we do callout while holding ch->lock, callout can't block */
xpc_arch_ops.notify_senders_of_disconnect(ch);
}
/* both sides are disconnected now */
if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
xpc_disconnect_callout(ch, xpDisconnected);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
/* it's now safe to free the channel's message queues */
xpc_arch_ops.teardown_msg_structures(ch);
ch->func = NULL;
ch->key = NULL;
ch->entry_size = 0;
ch->local_nentries = 0;
ch->remote_nentries = 0;
ch->kthreads_assigned_limit = 0;
ch->kthreads_idle_limit = 0;
/*
* Mark the channel disconnected and clear all other flags, including
* XPC_C_SETUP (because of call to
* xpc_arch_ops.teardown_msg_structures()) but not including
* XPC_C_WDISCONNECT (if it was set).
*/
ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
atomic_dec(&part->nchannels_active);
if (channel_was_connected) {
dev_info(xpc_chan, "channel %d to partition %d disconnected, "
"reason=%d\n", ch->number, ch->partid, ch->reason);
}
if (ch->flags & XPC_C_WDISCONNECT) {
/* we won't lose the CPU since we're holding ch->lock */
complete(&ch->wdisconnect_wait);
} else if (ch->delayed_chctl_flags) {
if (part->act_state != XPC_P_AS_DEACTIVATING) {
/* time to take action on any delayed chctl flags */
spin_lock(&part->chctl_lock);
part->chctl.flags[ch->number] |=
ch->delayed_chctl_flags;
spin_unlock(&part->chctl_lock);
}
ch->delayed_chctl_flags = 0;
}
}
/*
* Process a change in the channel's remote connection state.
*/
static void
xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
u8 chctl_flags)
{
unsigned long irq_flags;
struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
enum xp_retval reason;
enum xp_retval ret;
int create_kthread = 0;
spin_lock_irqsave(&ch->lock, irq_flags);
again:
if ((ch->flags & XPC_C_DISCONNECTED) &&
(ch->flags & XPC_C_WDISCONNECT)) {
/*
* Delay processing chctl flags until thread waiting disconnect
* has had a chance to see that the channel is disconnected.
*/
ch->delayed_chctl_flags |= chctl_flags;
goto out;
}
if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
"from partid=%d, channel=%d\n", args->reason,
ch->partid, ch->number);
/*
* If RCLOSEREQUEST is set, we're probably waiting for
* RCLOSEREPLY. We should find it and a ROPENREQUEST packed
* with this RCLOSEREQUEST in the chctl_flags.
*/
if (ch->flags & XPC_C_RCLOSEREQUEST) {
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
ch->flags |= XPC_C_RCLOSEREPLY;
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
goto again;
}
if (ch->flags & XPC_C_DISCONNECTED) {
if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
if (part->chctl.flags[ch_number] &
XPC_CHCTL_OPENREQUEST) {
DBUG_ON(ch->delayed_chctl_flags != 0);
spin_lock(&part->chctl_lock);
part->chctl.flags[ch_number] |=
XPC_CHCTL_CLOSEREQUEST;
spin_unlock(&part->chctl_lock);
}
goto out;
}
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
}
chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
XPC_CHCTL_OPENCOMPLETE);
/*
* The meaningful CLOSEREQUEST connection state fields are:
* reason = reason connection is to be closed
*/
ch->flags |= XPC_C_RCLOSEREQUEST;
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
if (reason <= xpSuccess || reason > xpUnknownReason)
reason = xpUnknownReason;
else if (reason == xpUnregistering)
reason = xpOtherUnregistering;
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
goto out;
}
xpc_process_disconnect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
"%d, channel=%d\n", ch->partid, ch->number);
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
if (part->chctl.flags[ch_number] &
XPC_CHCTL_CLOSEREQUEST) {
DBUG_ON(ch->delayed_chctl_flags != 0);
spin_lock(&part->chctl_lock);
part->chctl.flags[ch_number] |=
XPC_CHCTL_CLOSEREPLY;
spin_unlock(&part->chctl_lock);
}
goto out;
}
ch->flags |= XPC_C_RCLOSEREPLY;
if (ch->flags & XPC_C_CLOSEREPLY) {
/* both sides have finished disconnecting */
xpc_process_disconnect(ch, &irq_flags);
}
}
if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
"local_nentries=%d) received from partid=%d, "
"channel=%d\n", args->entry_size, args->local_nentries,
ch->partid, ch->number);
if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
goto out;
}
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
goto out;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST)));
DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_OPENREPLY | XPC_C_CONNECTED));
/*
* The meaningful OPENREQUEST connection state fields are:
* entry_size = size of channel's messages in bytes
* local_nentries = remote partition's local_nentries
*/
if (args->entry_size == 0 || args->local_nentries == 0) {
/* assume OPENREQUEST was delayed by mistake */
goto out;
}
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
ch->remote_nentries = args->local_nentries;
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->entry_size != ch->entry_size) {
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
goto out;
}
} else {
ch->entry_size = args->entry_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&part->nchannels_active);
}
xpc_process_connect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_OPENREPLY) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
"0x%lx, local_nentries=%d, remote_nentries=%d) "
"received from partid=%d, channel=%d\n",
args->local_msgqueue_pa, args->local_nentries,
args->remote_nentries, ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
goto out;
if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
DBUG_ON(ch->flags & XPC_C_CONNECTED);
/*
* The meaningful OPENREPLY connection state fields are:
* local_msgqueue_pa = physical address of remote
* partition's local_msgqueue
* local_nentries = remote partition's local_nentries
* remote_nentries = remote partition's remote_nentries
*/
DBUG_ON(args->local_msgqueue_pa == 0);
DBUG_ON(args->local_nentries == 0);
DBUG_ON(args->remote_nentries == 0);
ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
args->local_msgqueue_pa);
if (ret != xpSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
goto out;
}
ch->flags |= XPC_C_ROPENREPLY;
if (args->local_nentries < ch->remote_nentries) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
"remote_nentries=%d, old remote_nentries=%d, "
"partid=%d, channel=%d\n",
args->local_nentries, ch->remote_nentries,
ch->partid, ch->number);
ch->remote_nentries = args->local_nentries;
}
if (args->remote_nentries < ch->local_nentries) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
"local_nentries=%d, old local_nentries=%d, "
"partid=%d, channel=%d\n",
args->remote_nentries, ch->local_nentries,
ch->partid, ch->number);
ch->local_nentries = args->remote_nentries;
}
xpc_process_connect(ch, &irq_flags);
}
if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
"partid=%d, channel=%d\n", ch->partid, ch->number);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
goto out;
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_OPENREPLY)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
ch->flags |= XPC_C_ROPENCOMPLETE;
xpc_process_connect(ch, &irq_flags);
create_kthread = 1;
}
out:
spin_unlock_irqrestore(&ch->lock, irq_flags);
if (create_kthread)
xpc_create_kthreads(ch, 1, 0);
}
/*
* Attempt to establish a channel connection to a remote partition.
*/
static enum xp_retval
xpc_connect_channel(struct xpc_channel *ch)
{
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
if (mutex_trylock(®istration->mutex) == 0)
return xpRetry;
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(®istration->mutex);
return xpUnregistered;
}
spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(ch->flags & XPC_C_CONNECTED);
DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
mutex_unlock(®istration->mutex);
return ch->reason;
}
/* add info from the channel connect registration to the channel */
ch->kthreads_assigned_limit = registration->assigned_limit;
ch->kthreads_idle_limit = registration->idle_limit;
DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
ch->func = registration->func;
DBUG_ON(registration->func == NULL);
ch->key = registration->key;
ch->local_nentries = registration->nentries;
if (ch->flags & XPC_C_ROPENREQUEST) {
if (registration->entry_size != ch->entry_size) {
/* the local and remote sides aren't the same */
/*
* Because XPC_DISCONNECT_CHANNEL() can block we're
* forced to up the registration sema before we unlock
* the channel lock. But that's okay here because we're
* done with the part that required the registration
* sema. XPC_DISCONNECT_CHANNEL() requires that the
* channel lock be locked and will unlock and relock
* the channel lock as needed.
*/
mutex_unlock(®istration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpUnequalMsgSizes;
}
} else {
ch->entry_size = registration->entry_size;
XPC_SET_REASON(ch, 0, 0);
ch->flags &= ~XPC_C_DISCONNECTED;
atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
}
mutex_unlock(®istration->mutex);
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
xpc_process_connect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
void
xpc_process_sent_chctl_flags(struct xpc_partition *part)
{
unsigned long irq_flags;
union xpc_channel_ctl_flags chctl;
struct xpc_channel *ch;
int ch_number;
u32 ch_flags;
chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
/*
* Initiate channel connections for registered channels.
*
* For each connected channel that has pending messages activate idle
* kthreads and/or create new kthreads as needed.
*/
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
/*
* Process any open or close related chctl flags, and then deal
* with connecting or disconnecting the channel as required.
*/
if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
xpc_process_openclose_chctl_flags(part, ch_number,
chctl.flags[ch_number]);
}
ch_flags = ch->flags; /* need an atomic snapshot of flags */
if (ch_flags & XPC_C_DISCONNECTING) {
spin_lock_irqsave(&ch->lock, irq_flags);
xpc_process_disconnect(ch, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
continue;
}
if (part->act_state == XPC_P_AS_DEACTIVATING)
continue;
if (!(ch_flags & XPC_C_CONNECTED)) {
if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP);
(void)xpc_connect_channel(ch);
}
continue;
}
/*
* Process any message related chctl flags, this may involve
* the activation of kthreads to deliver any pending messages
* sent from the other partition.
*/
if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
}
}
/*
* XPC's heartbeat code calls this function to inform XPC that a partition is
* going down. XPC responds by tearing down the XPartition Communication
* infrastructure used for the just downed partition.
*
* XPC's heartbeat code will never call this function and xpc_partition_up()
* at the same time. Nor will it ever make multiple calls to either function
* at the same time.
*/
void
xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
{
unsigned long irq_flags;
int ch_number;
struct xpc_channel *ch;
dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
XPC_PARTID(part), reason);
if (!xpc_part_ref(part)) {
/* infrastructure for this partition isn't currently set up */
return;
}
/* disconnect channels associated with the partition going down */
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
}
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
/*
* Called by XP at the time of channel connection registration to cause
* XPC to establish connections to all currently active partitions.
*/
void
xpc_initiate_connect(int ch_number)
{
short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
/*
* Initiate the establishment of a connection on the
* newly registered channel to the remote partition.
*/
xpc_wakeup_channel_mgr(part);
xpc_part_deref(part);
}
}
}
void
xpc_connected_callout(struct xpc_channel *ch)
{
/* let the registerer know that a connection has been established */
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
ch->func(xpConnected, ch->partid, ch->number,
(void *)(u64)ch->local_nentries, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
}
/*
* Called by XP at the time of channel connection unregistration to cause
* XPC to teardown all current connections for the specified channel.
*
* Before returning xpc_initiate_disconnect() will wait until all connections
* on the specified channel have been closed/torndown. So the caller can be
* assured that they will not be receiving any more callouts from XPC to the
* function they registered via xpc_connect().
*
* Arguments:
*
* ch_number - channel # to unregister.
*/
void
xpc_initiate_disconnect(int ch_number)
{
unsigned long irq_flags;
short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
/* initiate the channel disconnect for every active partition */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
spin_lock_irqsave(&ch->lock, irq_flags);
if (!(ch->flags & XPC_C_DISCONNECTED)) {
ch->flags |= XPC_C_WDISCONNECT;
XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
&irq_flags);
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
}
}
xpc_disconnect_wait(ch_number);
}
/*
* To disconnect a channel, and reflect it back to all who may be waiting.
*
* An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
* xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
* xpc_disconnect_wait().
*
* THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
enum xp_retval reason, unsigned long *irq_flags)
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
DBUG_ON(!spin_is_locked(&ch->lock));
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
reason, line, ch->partid, ch->number);
XPC_SET_REASON(ch, reason, line);
ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
/* some of these may not have been set */
ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED);
xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED;
spin_unlock_irqrestore(&ch->lock, *irq_flags);
/* wake all idle kthreads so they can exit */
if (atomic_read(&ch->kthreads_idle) > 0) {
wake_up_all(&ch->idle_wq);
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
/* start a kthread that will do the xpDisconnecting callout */
xpc_create_kthreads(ch, 1, 1);
}
/* wake those waiting to allocate an entry from the local msg queue */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
void
xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
{
/*
* Let the channel's registerer know that the channel is being
* disconnected. We don't want to do this if the registerer was never
* informed of a connection being made.
*/
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
"channel=%d\n", reason, ch->partid, ch->number);
ch->func(reason, ch->partid, ch->number, NULL, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
"channel=%d\n", reason, ch->partid, ch->number);
}
}
/*
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
*/
enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
enum xp_retval ret;
if (ch->flags & XPC_C_DISCONNECTING) {
DBUG_ON(ch->reason == xpInterrupted);
return ch->reason;
}
atomic_inc(&ch->n_on_msg_allocate_wq);
ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
atomic_dec(&ch->n_on_msg_allocate_wq);
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
DBUG_ON(ch->reason == xpInterrupted);
} else if (ret == 0) {
ret = xpTimeout;
} else {
ret = xpInterrupted;
}
return ret;
}
/*
* Send a message that contains the user's payload on the specified channel
* connected to the specified partition.
*
* NOTE that this routine can sleep waiting for a message entry to become
* available. To not sleep, pass in the XPC_NOWAIT flag.
*
* Once sent, this routine will not wait for the message to be received, nor
* will notification be given when it does happen.
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* flags - see xp.h for valid flags.
* payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
*/
enum xp_retval
xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
u16 payload_size)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xp_retval ret = xpUnknownReason;
dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(payload == NULL);
if (xpc_part_ref(part)) {
ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
flags, payload, payload_size, 0, NULL, NULL);
xpc_part_deref(part);
}
return ret;
}
/*
* Send a message that contains the user's payload on the specified channel
* connected to the specified partition.
*
* NOTE that this routine can sleep waiting for a message entry to become
* available. To not sleep, pass in the XPC_NOWAIT flag.
*
* This routine will not wait for the message to be sent or received.
*
* Once the remote end of the channel has received the message, the function
* passed as an argument to xpc_initiate_send_notify() will be called. This
* allows the sender to free up or re-use any buffers referenced by the
* message, but does NOT mean the message has been processed at the remote
* end by a receiver.
*
* If this routine returns an error, the caller's function will NOT be called.
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # to send message on.
* flags - see xp.h for valid flags.
* payload - pointer to the payload which is to be sent.
* payload_size - size of the payload in bytes.
* func - function to call with asynchronous notification of message
* receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called.
*/
enum xp_retval
xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
u16 payload_size, xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xp_retval ret = xpUnknownReason;
dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
partid, ch_number);
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
DBUG_ON(payload == NULL);
DBUG_ON(func == NULL);
if (xpc_part_ref(part)) {
ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
flags, payload, payload_size, XPC_N_CALL, func, key);
xpc_part_deref(part);
}
return ret;
}
/*
* Deliver a message's payload to its intended recipient.
*/
void
xpc_deliver_payload(struct xpc_channel *ch)
{
void *payload;
payload = xpc_arch_ops.get_deliverable_payload(ch);
if (payload != NULL) {
/*
* This ref is taken to protect the payload itself from being
* freed before the user is finished with it, which the user
* indicates by calling xpc_initiate_received().
*/
xpc_msgqueue_ref(ch);
atomic_inc(&ch->kthreads_active);
if (ch->func != NULL) {
dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
"partid=%d channel=%d\n", payload, ch->partid,
ch->number);
/* deliver the message to its intended recipient */
ch->func(xpMsgReceived, ch->partid, ch->number, payload,
ch->key);
dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
"partid=%d channel=%d\n", payload, ch->partid,
ch->number);
}
atomic_dec(&ch->kthreads_active);
}
}
/*
* Acknowledge receipt of a delivered message's payload.
*
* This function, although called by users, does not call xpc_part_ref() to
* ensure that the partition infrastructure is in place. It relies on the
* fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
*
* Arguments:
*
* partid - ID of partition to which the channel is connected.
* ch_number - channel # message received on.
* payload - pointer to the payload area allocated via
* xpc_initiate_send() or xpc_initiate_send_notify().
*/
void
xpc_initiate_received(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
xpc_arch_ops.received_payload(ch, payload);
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
xpc_msgqueue_deref(ch);
}
| gpl-2.0 |
Elite-Kernels/HTC-10 | drivers/scsi/ufs/ufs_quirks.c | 73 | 3120 | /*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "ufshcd.h"
#include "ufs_quirks.h"
static struct ufs_card_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_HYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
END_FIX
};
static int ufs_get_device_info(struct ufs_hba *hba,
struct ufs_card_info *card_data)
{
int err;
u8 model_index;
u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
err = ufshcd_read_device_desc(hba, desc_buf,
QUERY_DESC_DEVICE_MAX_SIZE);
if (err)
goto out;
/*
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
if (err)
goto out;
str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
/* Null terminate the model string */
card_data->model[MAX_MODEL_LEN] = '\0';
out:
return err;
}
void ufs_advertise_fixup_device(struct ufs_hba *hba)
{
int err;
struct ufs_card_fix *f;
struct ufs_card_info card_data;
card_data.wmanufacturerid = 0;
card_data.model = kmalloc(MAX_MODEL_LEN + 1, GFP_KERNEL);
if (!card_data.model)
goto out;
/* get device data*/
err = ufs_get_device_info(hba, &card_data);
if (err) {
dev_err(hba->dev, "%s: Failed getting device info\n", __func__);
goto out;
}
for (f = ufs_fixups; f->quirk; f++) {
/* if same wmanufacturerid */
if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
(f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
/* and same model */
(STR_PRFX_EQUAL(f->card.model, card_data.model) ||
!strcmp(f->card.model, UFS_ANY_MODEL)))
/* update quirks */
hba->dev_quirks |= f->quirk;
}
out:
kfree(card_data.model);
}
| gpl-2.0 |
pgielda/imx6-linux | drivers/thermal/device_cooling.c | 73 | 3668 | /*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/thermal.h>
#include <linux/err.h>
#include <linux/slab.h>
struct devfreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int devfreq_state;
};
static DEFINE_IDR(devfreq_idr);
static DEFINE_MUTEX(devfreq_cooling_lock);
#define MAX_STATE 1
static BLOCKING_NOTIFIER_HEAD(devfreq_cooling_chain_head);
int register_devfreq_cooling_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(
&devfreq_cooling_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_devfreq_cooling_notifier);
int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(
&devfreq_cooling_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_devfreq_cooling_notifier);
static int devfreq_cooling_notifier_call_chain(unsigned long val)
{
return (blocking_notifier_call_chain(
&devfreq_cooling_chain_head, val, NULL)
== NOTIFY_BAD) ? -EINVAL : 0;
}
static int devfreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct devfreq_cooling_device *devfreq_device = cdev->devdata;
int ret;
ret = devfreq_cooling_notifier_call_chain(state);
if (ret)
return -EINVAL;
devfreq_device->devfreq_state = state;
return 0;
}
static int devfreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = MAX_STATE;
return 0;
}
static int devfreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct devfreq_cooling_device *devfreq_device = cdev->devdata;
*state = devfreq_device->devfreq_state;
return 0;
}
static struct thermal_cooling_device_ops const devfreq_cooling_ops = {
.get_max_state = devfreq_get_max_state,
.get_cur_state = devfreq_get_cur_state,
.set_cur_state = devfreq_set_cur_state,
};
static int get_idr(struct idr *idr, int *id)
{
int ret;
mutex_lock(&devfreq_cooling_lock);
ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&devfreq_cooling_lock);
if (unlikely(ret < 0))
return ret;
*id = ret;
return 0;
}
static void release_idr(struct idr *idr, int id)
{
mutex_lock(&devfreq_cooling_lock);
idr_remove(idr, id);
mutex_unlock(&devfreq_cooling_lock);
}
struct thermal_cooling_device *devfreq_cooling_register(void)
{
struct thermal_cooling_device *cool_dev;
struct devfreq_cooling_device *devfreq_dev = NULL;
char dev_name[THERMAL_NAME_LENGTH];
int ret = 0;
devfreq_dev = kzalloc(sizeof(struct devfreq_cooling_device),
GFP_KERNEL);
if (!devfreq_dev)
return ERR_PTR(-ENOMEM);
ret = get_idr(&devfreq_idr, &devfreq_dev->id);
if (ret) {
kfree(devfreq_dev);
return ERR_PTR(-EINVAL);
}
snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d",
devfreq_dev->id);
cool_dev = thermal_cooling_device_register(dev_name, devfreq_dev,
&devfreq_cooling_ops);
if (!cool_dev) {
release_idr(&devfreq_idr, devfreq_dev->id);
kfree(devfreq_dev);
return ERR_PTR(-EINVAL);
}
devfreq_dev->cool_dev = cool_dev;
devfreq_dev->devfreq_state = 0;
return cool_dev;
}
EXPORT_SYMBOL_GPL(devfreq_cooling_register);
void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct devfreq_cooling_device *devfreq_dev = cdev->devdata;
thermal_cooling_device_unregister(devfreq_dev->cool_dev);
release_idr(&devfreq_idr, devfreq_dev->id);
kfree(devfreq_dev);
}
EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
| gpl-2.0 |
smksyj/linux_modified_mlock | drivers/base/power/qos.c | 73 | 24146 | /*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
*
* Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
* Watchers can register different types of notification callbacks:
* . a per-device notification callback using the dev_pm_qos_*_notifier API.
* The notification chain data is stored in the per-device constraint
* data struct.
* . a system-wide notification callback using the dev_pm_qos_*_global_notifier
* API. The notification chain data is stored in a static variable.
*
* Note about the per-device constraint data struct allocation:
* . The per-device constraints data struct ptr is tored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
* . The data is later free'd when the device is removed from the system.
* . A global mutex protects the constraints users from the data being
* allocated and free'd.
*/
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <trace/events/power.h>
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
* @dev: Device to check the PM QoS flags for.
* @mask: Flags to check against.
*
* This routine must be called with dev->power.lock held.
*/
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
{
struct dev_pm_qos *qos = dev->power.qos;
struct pm_qos_flags *pqf;
s32 val;
if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags;
if (list_empty(&pqf->list))
return PM_QOS_FLAGS_UNDEFINED;
val = pqf->effective_flags & mask;
if (val)
return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
return PM_QOS_FLAGS_NONE;
}
/**
* dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
* @dev: Device to check the PM QoS flags for.
* @mask: Flags to check against.
*/
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
{
unsigned long irqflags;
enum pm_qos_flags_status ret;
spin_lock_irqsave(&dev->power.lock, irqflags);
ret = __dev_pm_qos_flags(dev, mask);
spin_unlock_irqrestore(&dev->power.lock, irqflags);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
* This routine must be called with dev->power.lock held.
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
/**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
*/
s32 dev_pm_qos_read_value(struct device *dev)
{
unsigned long flags;
s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
ret = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
/**
* apply_constraint - Add/modify/remove device PM QoS request.
* @req: Constraint request to apply
* @action: Action to perform (add/update/remove).
* @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device and the global notification
* callbacks
*/
static int apply_constraint(struct dev_pm_qos_request *req,
enum pm_qos_req_action action, s32 value)
{
struct dev_pm_qos *qos = req->dev->power.qos;
int ret;
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
if (ret) {
value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance,
&req->data.pnode, action, value);
if (ret) {
value = pm_qos_read_value(&qos->latency_tolerance);
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* dev_pm_qos_constraints_allocate
* @dev: device to allocate data for
*
* Called at the first call to add_request, for constraint data allocation
* Must be called with the dev_pm_qos_mtx mutex held
*/
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
struct dev_pm_qos *qos;
struct pm_qos_constraints *c;
struct blocking_notifier_head *n;
qos = kzalloc(sizeof(*qos), GFP_KERNEL);
if (!qos)
return -ENOMEM;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->resume_latency;
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
c = &qos->latency_tolerance;
plist_head_init(&c->list);
c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
dev->power.qos = qos;
spin_unlock_irq(&dev->power.lock);
return 0;
}
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
* @dev: target device
*
* Called from the device PM subsystem on device removal under device_pm_lock().
*/
void dev_pm_qos_constraints_destroy(struct device *dev)
{
struct dev_pm_qos *qos;
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
qos = dev->power.qos;
if (!qos)
goto out;
/* Flush the constraints lists for the device. */
c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
* callbacks if needed
*/
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
spin_lock_irq(&dev->power.lock);
dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
kfree(qos);
out:
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
static bool dev_pm_qos_invalid_request(struct device *dev,
struct dev_pm_qos_request *req)
{
return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
&& !dev->power.set_latency_tolerance);
}
static int __dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
int ret = 0;
if (!dev || dev_pm_qos_invalid_request(dev, req))
return -EINVAL;
if (WARN(dev_pm_qos_request_active(req),
"%s() called for already added request\n", __func__))
return -EINVAL;
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
if (!ret) {
req->dev = dev;
req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
}
return ret;
}
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
* @req: pointer to a preallocated handle
* @type: type of the request
* @value: defines the qos request
*
* This function inserts a new entry in the device constraints list of
* requested qos performance characteristics. It recomputes the aggregate
* QoS expectations of parameters and initializes the dev_pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
* to allocate for data structures, -ENODEV if the device has just been removed
* from the system.
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
/**
* __dev_pm_qos_update_request - Modify an existing device PM QoS request.
* @req : PM QoS request to modify.
* @new_value: New value to request.
*/
static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 new_value)
{
s32 curr_value;
int ret = 0;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
break;
default:
return -EINVAL;
}
trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
new_value);
if (curr_value != new_value)
ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
return ret;
}
/**
* dev_pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a dev_pm_qos request to use
* @new_value: defines the qos request
*
* Updates an existing dev PM qos request along with updating the
* target value.
*
* Attempts are made to make this code callable on hot code paths.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
if (WARN(!dev_pm_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
PM_QOS_DEFAULT_VALUE);
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret;
}
/**
* dev_pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value. Call this on slow code paths.
*
* Returns 1 if the aggregated constraint value has changed,
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
*
* Callers should ensure that the target device is not RPM_SUSPENDED before
* using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
/**
* dev_pm_qos_add_notifier - sets notification entry for changes to target value
* of per-device PM QoS constraints
*
* @dev: target device for the constraint
* @notifier: notifier block managed by caller.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
*
* If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails).
*/
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
{
int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR(dev->power.qos))
ret = -ENODEV;
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
/**
* dev_pm_qos_remove_notifier - deletes notification for changes to target value
* of per-device PM QoS constraints
*
* @dev: target device for the constraint
* @notifier: notifier block to be removed.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value.
*/
int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier)
{
int retval = 0;
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
mutex_unlock(&dev_pm_qos_mtx);
return retval;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
/**
* dev_pm_qos_add_global_notifier - sets notification entry for changes to
* target value of the PM QoS constraints for any device
*
* @notifier: notifier block managed by caller.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for any device.
*/
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
{
return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
/**
* dev_pm_qos_remove_global_notifier - deletes notification for changes to
* target value of PM QoS constraints for any device
*
* @notifier: notifier block to be removed.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value for any device.
*/
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
{
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
/**
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
* @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
while (ancestor && !ancestor->power.set_latency_tolerance)
ancestor = ancestor->parent;
break;
default:
ancestor = NULL;
}
if (ancestor)
ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
struct dev_pm_qos_request *req = NULL;
switch(type) {
case DEV_PM_QOS_RESUME_LATENCY:
req = dev->power.qos->resume_latency_req;
dev->power.qos->resume_latency_req = NULL;
break;
case DEV_PM_QOS_LATENCY_TOLERANCE:
req = dev->power.qos->latency_tolerance_req;
dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
}
__dev_pm_qos_remove_request(req);
kfree(req);
}
static void dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_drop_user_request(dev, type);
mutex_unlock(&dev_pm_qos_mtx);
}
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
* @value: Initial value of the latency limit.
*/
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
struct dev_pm_qos_request *req;
int ret;
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
}
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
/**
* dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
* @dev: Device whose PM QoS flags are to be exposed to user space.
* @val: Initial values of the flags.
*/
int dev_pm_qos_expose_flags(struct device *dev, s32 val)
{
struct dev_pm_qos_request *req;
int ret;
if (!device_is_registered(dev))
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
if (ret < 0) {
kfree(req);
return ret;
}
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
else if (dev->power.qos->flags_req)
ret = -EEXIST;
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
dev->power.qos->flags_req = req;
mutex_unlock(&dev_pm_qos_mtx);
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
}
/**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space.
*/
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_sysfs_mtx);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
/**
* dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
* @dev: Device to update the PM QoS flags request for.
* @mask: Flags to set/clear.
* @set: Whether to set or clear the flags (true means set).
*/
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
{
s32 value;
int ret;
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
ret = -EINVAL;
goto out;
}
value = dev_pm_qos_requested_flags(dev);
if (set)
value |= mask;
else
value &= ~mask;
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
/**
* dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
* @dev: Device to obtain the user space latency tolerance for.
*/
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
{
s32 ret;
mutex_lock(&dev_pm_qos_mtx);
ret = IS_ERR_OR_NULL(dev->power.qos)
|| !dev->power.qos->latency_tolerance_req ?
PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
dev->power.qos->latency_tolerance_req->data.pnode.prio;
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
/**
* dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
* @dev: Device to update the user space latency tolerance for.
* @val: New user space latency tolerance for @dev (negative values disable).
*/
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
{
int ret;
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos)
|| !dev->power.qos->latency_tolerance_req) {
struct dev_pm_qos_request *req;
if (val < 0) {
ret = -EINVAL;
goto out;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto out;
}
ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
if (ret < 0) {
kfree(req);
goto out;
}
dev->power.qos->latency_tolerance_req = req;
} else {
if (val < 0) {
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
ret = 0;
} else {
ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
}
}
out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
| gpl-2.0 |
shukiz/VAR-SOM-AM33-Kernel-3-14 | arch/powerpc/platforms/85xx/p1010rdb.c | 329 | 1979 | /*
* P1010RDB Board Setup
*
* Copyright 2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
void __init p1010_rdb_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
}
/*
* Setup the architecture
*/
static void __init p1010_rdb_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("p1010_rdb_setup_arch()", 0);
fsl_pci_assign_primary();
printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
}
machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices);
machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p1010_rdb_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
return 1;
if (of_flat_dt_is_compatible(root, "fsl,P1010RDB-PB"))
return 1;
return 0;
}
define_machine(p1010_rdb) {
.name = "P1010 RDB",
.probe = p1010_rdb_probe,
.setup_arch = p1010_rdb_setup_arch,
.init_IRQ = p1010_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
whyorean/android_kernel_xiaomi_msm8996 | drivers/mmc/host/au1xmmc.c | 329 | 29853 | /*
* linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
*
* Copyright (c) 2005, Advanced Micro Devices, Inc.
*
* Developed with help from the 2.4.30 MMC AU1XXX controller including
* the following copyright notices:
* Copyright (c) 2003-2004 Embedded Edge, LLC.
* Portions Copyright (C) 2002 Embedix, Inc
* Copyright 2002 Hewlett-Packard Company
* 2.6 version of this driver inspired by:
* (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
* All Rights Reserved.
* (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Why don't we use the SD controllers' carddetect feature?
*
* From the AU1100 MMC application guide:
* If the Au1100-based design is intended to support both MultiMediaCards
* and 1- or 4-data bit SecureDigital cards, then the solution is to
* connect a weak (560KOhm) pull-up resistor to connector pin 1.
* In doing so, a MMC card never enters SPI-mode communications,
* but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
* (the low to high transition will not occur).
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#define DRIVER_NAME "au1xxx-mmc"
/* Set this to enable special debugging macros */
/* #define DEBUG */
#ifdef DEBUG
#define DBG(fmt, idx, args...) \
pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
#else
#define DBG(fmt, idx, args...) do {} while (0)
#endif
/* Hardware definitions */
#define AU1XMMC_DESCRIPTOR_COUNT 1
/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
/* This gives us a hard value for the stop command that we can write directly
* to the command register.
*/
#define STOP_CMD \
(SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
/* This is the set of interrupts that we configure by default. */
#define AU1XMMC_INTERRUPTS \
(SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
SD_CONFIG_CR | SD_CONFIG_I)
/* The poll event (looking for insert/remove events runs twice a second. */
#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
struct au1xmmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
u32 flags;
void __iomem *iobase;
u32 clock;
u32 bus_width;
u32 power_mode;
int status;
struct {
int len;
int dir;
} dma;
struct {
int index;
int offset;
int len;
} pio;
u32 tx_chan;
u32 rx_chan;
int irq;
struct tasklet_struct finish_task;
struct tasklet_struct data_task;
struct au1xmmc_platform_data *platdata;
struct platform_device *pdev;
struct resource *ioarea;
struct clk *clk;
};
/* Status flags used by the host structure */
#define HOST_F_XMIT 0x0001
#define HOST_F_RECV 0x0002
#define HOST_F_DMA 0x0010
#define HOST_F_DBDMA 0x0020
#define HOST_F_ACTIVE 0x0100
#define HOST_F_STOP 0x1000
#define HOST_S_IDLE 0x0001
#define HOST_S_CMD 0x0002
#define HOST_S_DATA 0x0003
#define HOST_S_STOP 0x0004
/* Easy access macros */
#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
#define HOST_CMD(h) ((h)->iobase + SD_CMD)
#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
#define DMA_CHANNEL(h) \
(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
static inline int has_dbdma(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1200:
case ALCHEMY_CPU_AU1300:
return 1;
default:
return 0;
}
}
static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
{
u32 val = __raw_readl(HOST_CONFIG(host));
val |= mask;
__raw_writel(val, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static inline void FLUSH_FIFO(struct au1xmmc_host *host)
{
u32 val = __raw_readl(HOST_CONFIG2(host));
__raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
mdelay(1);
/* SEND_STOP will turn off clock control - this re-enables it */
val &= ~SD_CONFIG2_DF;
__raw_writel(val, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
}
static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
{
u32 val = __raw_readl(HOST_CONFIG(host));
val &= ~mask;
__raw_writel(val, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static inline void SEND_STOP(struct au1xmmc_host *host)
{
u32 config2;
WARN_ON(host->status != HOST_S_DATA);
host->status = HOST_S_STOP;
config2 = __raw_readl(HOST_CONFIG2(host));
__raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
/* Send the stop command */
__raw_writel(STOP_CMD, HOST_CMD(host));
wmb(); /* drain writebuffer */
}
static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
{
if (host->platdata && host->platdata->set_power)
host->platdata->set_power(host->mmc, state);
}
static int au1xmmc_card_inserted(struct mmc_host *mmc)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (host->platdata && host->platdata->card_inserted)
return !!host->platdata->card_inserted(host->mmc);
return -ENOSYS;
}
static int au1xmmc_card_readonly(struct mmc_host *mmc)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (host->platdata && host->platdata->card_readonly)
return !!host->platdata->card_readonly(mmc);
return -ENOSYS;
}
static void au1xmmc_finish_request(struct au1xmmc_host *host)
{
struct mmc_request *mrq = host->mrq;
host->mrq = NULL;
host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
host->dma.len = 0;
host->dma.dir = 0;
host->pio.index = 0;
host->pio.offset = 0;
host->pio.len = 0;
host->status = HOST_S_IDLE;
mmc_request_done(host->mmc, mrq);
}
static void au1xmmc_tasklet_finish(unsigned long param)
{
struct au1xmmc_host *host = (struct au1xmmc_host *) param;
au1xmmc_finish_request(host);
}
static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
struct mmc_command *cmd, struct mmc_data *data)
{
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
break;
case MMC_RSP_R1:
mmccmd |= SD_CMD_RT_1;
break;
case MMC_RSP_R1B:
mmccmd |= SD_CMD_RT_1B;
break;
case MMC_RSP_R2:
mmccmd |= SD_CMD_RT_2;
break;
case MMC_RSP_R3:
mmccmd |= SD_CMD_RT_3;
break;
default:
pr_info("au1xmmc: unhandled response type %02x\n",
mmc_resp_type(cmd));
return -EINVAL;
}
if (data) {
if (data->flags & MMC_DATA_READ) {
if (data->blocks > 1)
mmccmd |= SD_CMD_CT_4;
else
mmccmd |= SD_CMD_CT_2;
} else if (data->flags & MMC_DATA_WRITE) {
if (data->blocks > 1)
mmccmd |= SD_CMD_CT_3;
else
mmccmd |= SD_CMD_CT_1;
}
}
__raw_writel(cmd->arg, HOST_CMDARG(host));
wmb(); /* drain writebuffer */
if (wait)
IRQ_OFF(host, SD_CONFIG_CR);
__raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
wmb(); /* drain writebuffer */
/* Wait for the command to go on the line */
while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
/* nop */;
/* Wait for the command to come back */
if (wait) {
u32 status = __raw_readl(HOST_STATUS(host));
while (!(status & SD_STATUS_CR))
status = __raw_readl(HOST_STATUS(host));
/* Clear the CR status */
__raw_writel(SD_STATUS_CR, HOST_STATUS(host));
IRQ_ON(host, SD_CONFIG_CR);
}
return 0;
}
static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
{
struct mmc_request *mrq = host->mrq;
struct mmc_data *data;
u32 crc;
WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
if (host->mrq == NULL)
return;
data = mrq->cmd->data;
if (status == 0)
status = __raw_readl(HOST_STATUS(host));
/* The transaction is really over when the SD_STATUS_DB bit is clear */
while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
status = __raw_readl(HOST_STATUS(host));
data->error = 0;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
/* Process any errors */
crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
if (host->flags & HOST_F_XMIT)
crc |= ((status & 0x07) == 0x02) ? 0 : 1;
if (crc)
data->error = -EILSEQ;
/* Clear the CRC bits */
__raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
data->bytes_xfered = 0;
if (!data->error) {
if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
u32 chan = DMA_CHANNEL(host);
chan_tab_t *c = *((chan_tab_t **)chan);
au1x_dma_chan_t *cp = c->chan_ptr;
data->bytes_xfered = cp->ddma_bytecnt;
} else
data->bytes_xfered =
(data->blocks * data->blksz) - host->pio.len;
}
au1xmmc_finish_request(host);
}
static void au1xmmc_tasklet_data(unsigned long param)
{
struct au1xmmc_host *host = (struct au1xmmc_host *)param;
u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
}
#define AU1XMMC_MAX_TRANSFER 8
static void au1xmmc_send_pio(struct au1xmmc_host *host)
{
struct mmc_data *data;
int sg_len, max, count;
unsigned char *sg_ptr, val;
u32 status;
struct scatterlist *sg;
data = host->mrq->data;
if (!(host->flags & HOST_F_XMIT))
return;
/* This is the pointer to the data buffer */
sg = &data->sg[host->pio.index];
sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = data->sg[host->pio.index].length - host->pio.offset;
/* Check if we need less than the size of the sg_buffer */
max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
if (max > AU1XMMC_MAX_TRANSFER)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_TH))
break;
val = *sg_ptr++;
__raw_writel((unsigned long)val, HOST_TXPORT(host));
wmb(); /* drain writebuffer */
}
host->pio.len -= count;
host->pio.offset += count;
if (count == sg_len) {
host->pio.index++;
host->pio.offset = 0;
}
if (host->pio.len == 0) {
IRQ_OFF(host, SD_CONFIG_TH);
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
}
static void au1xmmc_receive_pio(struct au1xmmc_host *host)
{
struct mmc_data *data;
int max, count, sg_len = 0;
unsigned char *sg_ptr = NULL;
u32 status, val;
struct scatterlist *sg;
data = host->mrq->data;
if (!(host->flags & HOST_F_RECV))
return;
max = host->pio.len;
if (host->pio.index < host->dma.len) {
sg = &data->sg[host->pio.index];
sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
/* Check if we need less than the size of the sg_buffer */
if (sg_len < max)
max = sg_len;
}
if (max > AU1XMMC_MAX_TRANSFER)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_NE))
break;
if (status & SD_STATUS_RC) {
DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
host->pio.len, count);
break;
}
if (status & SD_STATUS_RO) {
DBG("RX Overrun [%d + %d]\n", host->pdev->id,
host->pio.len, count);
break;
}
else if (status & SD_STATUS_RU) {
DBG("RX Underrun [%d + %d]\n", host->pdev->id,
host->pio.len, count);
break;
}
val = __raw_readl(HOST_RXPORT(host));
if (sg_ptr)
*sg_ptr++ = (unsigned char)(val & 0xFF);
}
host->pio.len -= count;
host->pio.offset += count;
if (sg_len && count == sg_len) {
host->pio.index++;
host->pio.offset = 0;
}
if (host->pio.len == 0) {
/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
IRQ_OFF(host, SD_CONFIG_NE);
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
}
/* This is called when a command has been completed - grab the response
* and check for errors. Then start the data transfer if it is indicated.
*/
static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
{
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd;
u32 r[4];
int i, trans;
if (!host->mrq)
return;
cmd = mrq->cmd;
cmd->error = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
r[0] = __raw_readl(host->iobase + SD_RESP3);
r[1] = __raw_readl(host->iobase + SD_RESP2);
r[2] = __raw_readl(host->iobase + SD_RESP1);
r[3] = __raw_readl(host->iobase + SD_RESP0);
/* The CRC is omitted from the response, so really
* we only got 120 bytes, but the engine expects
* 128 bits, so we have to shift things up.
*/
for (i = 0; i < 4; i++) {
cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
if (i != 3)
cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
}
} else {
/* Techincally, we should be getting all 48 bits of
* the response (SD_RESP1 + SD_RESP2), but because
* our response omits the CRC, our data ends up
* being shifted 8 bits to the right. In this case,
* that means that the OSR data starts at bit 31,
* so we can just read RESP0 and return that.
*/
cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
}
}
/* Figure out errors */
if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
cmd->error = -EILSEQ;
trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
if (!trans || cmd->error) {
IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
tasklet_schedule(&host->finish_task);
return;
}
host->status = HOST_S_DATA;
if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
u32 channel = DMA_CHANNEL(host);
/* Start the DBDMA as soon as the buffer gets something in it */
if (host->flags & HOST_F_RECV) {
u32 mask = SD_STATUS_DB | SD_STATUS_NE;
while((status & mask) != mask)
status = __raw_readl(HOST_STATUS(host));
}
au1xxx_dbdma_start(channel);
}
}
static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
{
unsigned int pbus = clk_get_rate(host->clk);
unsigned int divisor = ((pbus / rate) / 2) - 1;
u32 config;
config = __raw_readl(HOST_CONFIG(host));
config &= ~(SD_CONFIG_DIV);
config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
__raw_writel(config, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static int au1xmmc_prepare_data(struct au1xmmc_host *host,
struct mmc_data *data)
{
int datalen = data->blocks * data->blksz;
if (data->flags & MMC_DATA_READ)
host->flags |= HOST_F_RECV;
else
host->flags |= HOST_F_XMIT;
if (host->mrq->stop)
host->flags |= HOST_F_STOP;
host->dma.dir = DMA_BIDIRECTIONAL;
host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma.dir);
if (host->dma.len == 0)
return -ETIMEDOUT;
__raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
int i;
u32 channel = DMA_CHANNEL(host);
au1xxx_dbdma_stop(channel);
for (i = 0; i < host->dma.len; i++) {
u32 ret = 0, flags = DDMA_FLAGS_NOIE;
struct scatterlist *sg = &data->sg[i];
int sg_len = sg->length;
int len = (datalen > sg_len) ? sg_len : datalen;
if (i == host->dma.len - 1)
flags = DDMA_FLAGS_IE;
if (host->flags & HOST_F_XMIT) {
ret = au1xxx_dbdma_put_source(channel,
sg_phys(sg), len, flags);
} else {
ret = au1xxx_dbdma_put_dest(channel,
sg_phys(sg), len, flags);
}
if (!ret)
goto dataerr;
datalen -= len;
}
} else {
host->pio.index = 0;
host->pio.offset = 0;
host->pio.len = datalen;
if (host->flags & HOST_F_XMIT)
IRQ_ON(host, SD_CONFIG_TH);
else
IRQ_ON(host, SD_CONFIG_NE);
/* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
}
return 0;
dataerr:
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma.dir);
return -ETIMEDOUT;
}
/* This actually starts a command or data transaction */
static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
{
struct au1xmmc_host *host = mmc_priv(mmc);
int ret = 0;
WARN_ON(irqs_disabled());
WARN_ON(host->status != HOST_S_IDLE);
host->mrq = mrq;
host->status = HOST_S_CMD;
/* fail request immediately if no card is present */
if (0 == au1xmmc_card_inserted(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
au1xmmc_finish_request(host);
return;
}
if (mrq->data) {
FLUSH_FIFO(host);
ret = au1xmmc_prepare_data(host, mrq->data);
}
if (!ret)
ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data);
if (ret) {
mrq->cmd->error = ret;
au1xmmc_finish_request(host);
}
}
static void au1xmmc_reset_controller(struct au1xmmc_host *host)
{
/* Apply the clock */
__raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
mdelay(1);
__raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
mdelay(5);
__raw_writel(~0, HOST_STATUS(host));
wmb(); /* drain writebuffer */
__raw_writel(0, HOST_BLKSIZE(host));
__raw_writel(0x001fffff, HOST_TIMEOUT(host));
wmb(); /* drain writebuffer */
__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
__raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
mdelay(1);
__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
/* Configure interrupts */
__raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct au1xmmc_host *host = mmc_priv(mmc);
u32 config2;
if (ios->power_mode == MMC_POWER_OFF)
au1xmmc_set_power(host, 0);
else if (ios->power_mode == MMC_POWER_ON) {
au1xmmc_set_power(host, 1);
}
if (ios->clock && ios->clock != host->clock) {
au1xmmc_set_clock(host, ios->clock);
host->clock = ios->clock;
}
config2 = __raw_readl(HOST_CONFIG2(host));
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
config2 |= SD_CONFIG2_BB;
break;
case MMC_BUS_WIDTH_4:
config2 &= ~SD_CONFIG2_BB;
config2 |= SD_CONFIG2_WB;
break;
case MMC_BUS_WIDTH_1:
config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
break;
}
__raw_writel(config2, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
}
#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
#define STATUS_DATA_IN (SD_STATUS_NE)
#define STATUS_DATA_OUT (SD_STATUS_TH)
static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
{
struct au1xmmc_host *host = dev_id;
u32 status;
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_I))
return IRQ_NONE; /* not ours */
if (status & SD_STATUS_SI) /* SDIO */
mmc_signal_sdio_irq(host->mmc);
if (host->mrq && (status & STATUS_TIMEOUT)) {
if (status & SD_STATUS_RAT)
host->mrq->cmd->error = -ETIMEDOUT;
else if (status & SD_STATUS_DT)
host->mrq->data->error = -ETIMEDOUT;
/* In PIO mode, interrupts might still be enabled */
IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
tasklet_schedule(&host->finish_task);
}
#if 0
else if (status & SD_STATUS_DD) {
/* Sometimes we get a DD before a NE in PIO mode */
if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
au1xmmc_receive_pio(host);
else {
au1xmmc_data_complete(host, status);
/* tasklet_schedule(&host->data_task); */
}
}
#endif
else if (status & SD_STATUS_CR) {
if (host->status == HOST_S_CMD)
au1xmmc_cmd_complete(host, status);
} else if (!(host->flags & HOST_F_DMA)) {
if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
au1xmmc_send_pio(host);
else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
au1xmmc_receive_pio(host);
} else if (status & 0x203F3C70) {
DBG("Unhandled status %8.8x\n", host->pdev->id,
status);
}
__raw_writel(status, HOST_STATUS(host));
wmb(); /* drain writebuffer */
return IRQ_HANDLED;
}
/* 8bit memory DMA device */
static dbdev_tab_t au1xmmc_mem_dbdev = {
.dev_id = DSCR_CMD0_ALWAYS,
.dev_flags = DEV_FLAGS_ANYUSE,
.dev_tsize = 0,
.dev_devwidth = 8,
.dev_physaddr = 0x00000000,
.dev_intlevel = 0,
.dev_intpolarity = 0,
};
static int memid;
static void au1xmmc_dbdma_callback(int irq, void *dev_id)
{
struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
/* Avoid spurious interrupts */
if (!host->mrq)
return;
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
{
struct resource *res;
int txid, rxid;
res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
if (!res)
return -ENODEV;
txid = res->start;
res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
if (!res)
return -ENODEV;
rxid = res->start;
if (!memid)
return -ENODEV;
host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
au1xmmc_dbdma_callback, (void *)host);
if (!host->tx_chan) {
dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
return -ENODEV;
}
host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
au1xmmc_dbdma_callback, (void *)host);
if (!host->rx_chan) {
dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
au1xxx_dbdma_chan_free(host->tx_chan);
return -ENODEV;
}
au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
/* DBDMA is good to go */
host->flags |= HOST_F_DMA | HOST_F_DBDMA;
return 0;
}
static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
{
if (host->flags & HOST_F_DMA) {
host->flags &= ~HOST_F_DMA;
au1xxx_dbdma_chan_free(host->tx_chan);
au1xxx_dbdma_chan_free(host->rx_chan);
}
}
static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (en)
IRQ_ON(host, SD_CONFIG_SI);
else
IRQ_OFF(host, SD_CONFIG_SI);
}
static const struct mmc_host_ops au1xmmc_ops = {
.request = au1xmmc_request,
.set_ios = au1xmmc_set_ios,
.get_ro = au1xmmc_card_readonly,
.get_cd = au1xmmc_card_inserted,
.enable_sdio_irq = au1xmmc_enable_sdio_irq,
};
static int au1xmmc_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct au1xmmc_host *host;
struct resource *r;
int ret, iflag;
mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
if (!mmc) {
dev_err(&pdev->dev, "no memory for mmc_host\n");
ret = -ENOMEM;
goto out0;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->platdata = pdev->dev.platform_data;
host->pdev = pdev;
ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no mmio defined\n");
goto out1;
}
host->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!host->ioarea) {
dev_err(&pdev->dev, "mmio already in use\n");
goto out1;
}
host->iobase = ioremap(r->start, 0x3c);
if (!host->iobase) {
dev_err(&pdev->dev, "cannot remap mmio\n");
goto out2;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r) {
dev_err(&pdev->dev, "no IRQ defined\n");
goto out3;
}
host->irq = r->start;
mmc->ops = &au1xmmc_ops;
mmc->f_min = 450000;
mmc->f_max = 24000000;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
mmc->ocr_avail = AU1XMMC_OCR;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1100:
mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
break;
case ALCHEMY_CPU_AU1200:
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
break;
case ALCHEMY_CPU_AU1300:
iflag = 0; /* nothing is shared */
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
mmc->f_max = 52000000;
if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
mmc->caps |= MMC_CAP_8_BIT_DATA;
break;
}
ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
if (ret) {
dev_err(&pdev->dev, "cannot grab IRQ\n");
goto out3;
}
host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "cannot find clock\n");
ret = PTR_ERR(host->clk);
goto out_irq;
}
ret = clk_prepare_enable(host->clk);
if (ret) {
dev_err(&pdev->dev, "cannot enable clock\n");
goto out_clk;
}
host->status = HOST_S_IDLE;
/* board-specific carddetect setup, if any */
if (host->platdata && host->platdata->cd_setup) {
ret = host->platdata->cd_setup(mmc, 1);
if (ret) {
dev_warn(&pdev->dev, "board CD setup failed\n");
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
} else
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* platform may not be able to use all advertised caps */
if (host->platdata)
mmc->caps &= ~(host->platdata->mask_host_caps);
tasklet_init(&host->data_task, au1xmmc_tasklet_data,
(unsigned long)host);
tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
(unsigned long)host);
if (has_dbdma()) {
ret = au1xmmc_dbdma_init(host);
if (ret)
pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
}
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led) {
struct led_classdev *led = host->platdata->led;
led->name = mmc_hostname(mmc);
led->brightness = LED_OFF;
led->default_trigger = mmc_hostname(mmc);
ret = led_classdev_register(mmc_dev(mmc), led);
if (ret)
goto out5;
}
#endif
au1xmmc_reset_controller(host);
ret = mmc_add_host(mmc);
if (ret) {
dev_err(&pdev->dev, "cannot add mmc host\n");
goto out6;
}
platform_set_drvdata(pdev, host);
pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
" (mode=%s)\n", pdev->id, host->iobase,
host->flags & HOST_F_DMA ? "dma" : "pio");
return 0; /* all ok */
out6:
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
led_classdev_unregister(host->platdata->led);
out5:
#endif
__raw_writel(0, HOST_ENABLE(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
out_clk:
clk_disable_unprepare(host->clk);
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
out3:
iounmap((void *)host->iobase);
out2:
release_resource(host->ioarea);
kfree(host->ioarea);
out1:
mmc_free_host(mmc);
out0:
return ret;
}
static int au1xmmc_remove(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
if (host) {
mmc_remove_host(host->mmc);
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
led_classdev_unregister(host->platdata->led);
#endif
if (host->platdata && host->platdata->cd_setup &&
!(host->mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(host->mmc, 0);
__raw_writel(0, HOST_ENABLE(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
au1xmmc_set_power(host, 0);
clk_disable_unprepare(host->clk);
clk_put(host->clk);
free_irq(host->irq, host);
iounmap((void *)host->iobase);
release_resource(host->ioarea);
kfree(host->ioarea);
mmc_free_host(host->mmc);
}
return 0;
}
#ifdef CONFIG_PM
static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
__raw_writel(0, HOST_CONFIG2(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0xffffffff, HOST_STATUS(host));
__raw_writel(0, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
return 0;
}
static int au1xmmc_resume(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
au1xmmc_reset_controller(host);
return 0;
}
#else
#define au1xmmc_suspend NULL
#define au1xmmc_resume NULL
#endif
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
.suspend = au1xmmc_suspend,
.resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static int __init au1xmmc_init(void)
{
if (has_dbdma()) {
/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
* of 8 bits. And since devices are shared, we need to create
* our own to avoid freaking out other devices.
*/
memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
if (!memid)
pr_err("au1xmmc: cannot add memory dbdma\n");
}
return platform_driver_register(&au1xmmc_driver);
}
static void __exit au1xmmc_exit(void)
{
if (has_dbdma() && memid)
au1xxx_ddma_del_device(memid);
platform_driver_unregister(&au1xmmc_driver);
}
module_init(au1xmmc_init);
module_exit(au1xmmc_exit);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:au1xxx-mmc");
| gpl-2.0 |
giveen/kernel_dell_streak7 | drivers/char/hvcs.c | 1097 | 46772 | /*
* IBM eServer Hypervisor Virtual Console Server Device Driver
* Copyright (C) 2003, 2004 IBM Corp.
* Ryan S. Arnold (rsa@us.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
*
* This is the device driver for the IBM Hypervisor Virtual Console Server,
* "hvcs". The IBM hvcs provides a tty driver interface to allow Linux
* user space applications access to the system consoles of logically
* partitioned operating systems, e.g. Linux, running on the same partitioned
* Power5 ppc64 system. Physical hardware consoles per partition are not
* practical on this hardware so system consoles are accessed by this driver
* using inter-partition firmware interfaces to virtual terminal devices.
*
* A vty is known to the HMC as a "virtual serial server adapter". It is a
* virtual terminal device that is created by firmware upon partition creation
* to act as a partitioned OS's console device.
*
* Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64
* Linux system upon their creation by the HMC or their exposure during boot.
* The non-user interactive backend of this driver is implemented as a vio
* device driver so that it can receive notification of vty-server lifetimes
* after it registers with the vio bus to handle vty-server probe and remove
* callbacks.
*
* Many vty-servers can be configured to connect to one vty, but a vty can
* only be actively connected to by a single vty-server, in any manner, at one
* time. If the HMC is currently hosting the console for a target Linux
* partition; attempts to open the tty device to the partition's console using
* the hvcs on any partition will return -EBUSY with every open attempt until
* the HMC frees the connection between its vty-server and the desired
* partition's vty device. Conversely, a vty-server may only be connected to
* a single vty at one time even though it may have several configured vty
* partner possibilities.
*
* Firmware does not provide notification of vty partner changes to this
* driver. This means that an HMC Super Admin may add or remove partner vtys
* from a vty-server's partner list but the changes will not be signaled to
* the vty-server. Firmware only notifies the driver when a vty-server is
* added or removed from the system. To compensate for this deficiency, this
* driver implements a sysfs update attribute which provides a method for
* rescanning partner information upon a user's request.
*
* Each vty-server, prior to being exposed to this driver is reference counted
* using the 2.6 Linux kernel kref construct.
*
* For direction on installation and usage of this driver please reference
* Documentation/powerpc/hvcs.txt.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/major.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/hvconsole.h>
#include <asm/hvcserver.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
/*
* 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
* Removed braces around single statements following conditionals. Removed '=
* 0' after static int declarations since these default to zero. Removed
* list_for_each_safe() and replaced with list_for_each_entry() in
* hvcs_get_by_index(). The 'safe' version is un-needed now that the driver is
* using spinlocks. Changed spin_lock_irqsave() to spin_lock() when locking
* hvcs_structs_lock and hvcs_pi_lock since these are not touched in an int
* handler. Initialized hvcs_structs_lock and hvcs_pi_lock to
* SPIN_LOCK_UNLOCKED at declaration time rather than in hvcs_module_init().
* Added spin_lock around list_del() in destroy_hvcs_struct() to protect the
* list traversals from a deletion. Removed '= NULL' from pointer declaration
* statements since they are initialized NULL by default. Removed wmb()
* instances from hvcs_try_write(). They probably aren't needed with locking in
* place. Added check and cleanup for hvcs_pi_buff = kmalloc() in
* hvcs_module_init(). Exposed hvcs_struct.index via a sysfs attribute so that
* the coupling between /dev/hvcs* and a vty-server can be automatically
* determined. Moved kobject_put() in hvcs_open outside of the
* spin_unlock_irqrestore().
*
* 1.3.1 -> 1.3.2 Changed method for determining hvcs_struct->index and had it
* align with how the tty layer always assigns the lowest index available. This
* change resulted in a list of ints that denotes which indexes are available.
* Device additions and removals use the new hvcs_get_index() and
* hvcs_return_index() helper functions. The list is created with
* hvsc_alloc_index_list() and it is destroyed with hvcs_free_index_list().
* Without these fixes hotplug vty-server adapter support goes crazy with this
* driver if the user removes a vty-server adapter. Moved free_irq() outside of
* the hvcs_final_close() function in order to get it out of the spinlock.
* Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping
* on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from
* arch/powerepc/include/asm/hvcserver.h
*
* 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to
* prevent possible lockup with realtime scheduling as similarily pointed out by
* akpm in hvc_console. Changed resulted in the removal of hvcs_final_close()
* to reorder cleanup operations and prevent discarding of pending data during
* an hvcs_close(). Removed spinlock protection of hvcs_struct data members in
* hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed.
*/
#define HVCS_DRIVER_VERSION "1.3.3"
MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HVCS_DRIVER_VERSION);
/*
* Wait this long per iteration while trying to push buffered data to the
* hypervisor before allowing the tty to complete a close operation.
*/
#define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
/*
* Since the Linux TTY code does not currently (2-04-2004) support dynamic
* addition of tty derived devices and we shouldn't allocate thousands of
* tty_device pointers when the number of vty-server & vty partner connections
* will most often be much lower than this, we'll arbitrarily allocate
* HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we
* register the tty_driver. This can be overridden using an insmod parameter.
*/
#define HVCS_DEFAULT_SERVER_ADAPTERS 64
/*
* The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device
* nodes as a sanity check. Theoretically there can be over 1 Billion
* vty-server & vty partner connections.
*/
#define HVCS_MAX_SERVER_ADAPTERS 1024
/*
* We let Linux assign us a major number and we start the minors at zero. There
* is no intuitive mapping between minor number and the target vty-server
* adapter except that each new vty-server adapter is always assigned to the
* smallest minor number available.
*/
#define HVCS_MINOR_START 0
/*
* The hcall interface involves putting 8 chars into each of two registers.
* We load up those 2 registers (in arch/powerpc/platforms/pseries/hvconsole.c)
* by casting char[16] to long[2]. It would work without __ALIGNED__, but a
* little (tiny) bit slower because an unaligned load is slower than aligned
* load.
*/
#define __ALIGNED__ __attribute__((__aligned__(8)))
/*
* How much data can firmware send with each hvc_put_chars()? Maybe this
* should be moved into an architecture specific area.
*/
#define HVCS_BUFF_LEN 16
/*
* This is the maximum amount of data we'll let the user send us (hvcs_write) at
* once in a chunk as a sanity check.
*/
#define HVCS_MAX_FROM_USER 4096
/*
* Be careful when adding flags to this line discipline. Don't add anything
* that will cause echoing or we'll go into recursive loop echoing chars back
* and forth with the console drivers.
*/
static struct ktermios hvcs_tty_termios = {
.c_iflag = IGNBRK | IGNPAR,
.c_oflag = OPOST,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
.c_ospeed = 38400
};
/*
* This value is used to take the place of a command line parameter when the
* module is inserted. It starts as -1 and stays as such if the user doesn't
* specify a module insmod parameter. If they DO specify one then it is set to
* the value of the integer passed in.
*/
static int hvcs_parm_num_devs = -1;
module_param(hvcs_parm_num_devs, int, 0);
static const char hvcs_driver_name[] = "hvcs";
static const char hvcs_device_node[] = "hvcs";
static const char hvcs_driver_string[]
= "IBM hvcs (Hypervisor Virtual Console Server) Driver";
/* Status of partner info rescan triggered via sysfs. */
static int hvcs_rescan_status;
static struct tty_driver *hvcs_tty_driver;
/*
* In order to be somewhat sane this driver always associates the hvcs_struct
* index element with the numerically equal tty->index. This means that a
* hotplugged vty-server adapter will always map to the lowest index valued
* device node. If vty-servers were hotplug removed from the system and then
* new ones added the new vty-server may have the largest slot number of all
* the vty-server adapters in the partition but it may have the lowest dev node
* index of all the adapters due to the hole left by the hotplug removed
* adapter. There are a set of functions provided to get the lowest index for
* a new device as well as return the index to the list. This list is allocated
* with a number of elements equal to the number of device nodes requested when
* the module was inserted.
*/
static int *hvcs_index_list;
/*
* How large is the list? This is kept for traversal since the list is
* dynamically created.
*/
static int hvcs_index_count;
/*
* Used by the khvcsd to pick up I/O operations when the kernel_thread is
* already awake but potentially shifted to TASK_INTERRUPTIBLE state.
*/
static int hvcs_kicked;
/*
* Use by the kthread construct for task operations like waking the sleeping
* thread and stopping the kthread.
*/
static struct task_struct *hvcs_task;
/*
* We allocate this for the use of all of the hvcs_structs when they fetch
* partner info.
*/
static unsigned long *hvcs_pi_buff;
/* Only allow one hvcs_struct to use the hvcs_pi_buff at a time. */
static DEFINE_SPINLOCK(hvcs_pi_lock);
/* One vty-server per hvcs_struct */
struct hvcs_struct {
spinlock_t lock;
/*
* This index identifies this hvcs device as the complement to a
* specific tty index.
*/
unsigned int index;
struct tty_struct *tty;
int open_count;
/*
* Used to tell the driver kernel_thread what operations need to take
* place upon this hvcs_struct instance.
*/
int todo_mask;
/*
* This buffer is required so that when hvcs_write_room() reports that
* it can send HVCS_BUFF_LEN characters that it will buffer the full
* HVCS_BUFF_LEN characters if need be. This is essential for opost
* writes since they do not do high level buffering and expect to be
* able to send what the driver commits to sending buffering
* [e.g. tab to space conversions in n_tty.c opost()].
*/
char buffer[HVCS_BUFF_LEN];
int chars_in_buffer;
/*
* Any variable below the kref is valid before a tty is connected and
* stays valid after the tty is disconnected. These shouldn't be
* whacked until the koject refcount reaches zero though some entries
* may be changed via sysfs initiatives.
*/
struct kref kref; /* ref count & hvcs_struct lifetime */
int connected; /* is the vty-server currently connected to a vty? */
uint32_t p_unit_address; /* partner unit address */
uint32_t p_partition_ID; /* partner partition ID */
char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */
struct list_head next; /* list management */
struct vio_dev *vdev;
};
/* Required to back map a kref to its containing object */
#define from_kref(k) container_of(k, struct hvcs_struct, kref)
static LIST_HEAD(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
static void hvcs_unthrottle(struct tty_struct *tty);
static void hvcs_throttle(struct tty_struct *tty);
static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance);
static int hvcs_write(struct tty_struct *tty,
const unsigned char *buf, int count);
static int hvcs_write_room(struct tty_struct *tty);
static int hvcs_chars_in_buffer(struct tty_struct *tty);
static int hvcs_has_pi(struct hvcs_struct *hvcsd);
static void hvcs_set_pi(struct hvcs_partner_info *pi,
struct hvcs_struct *hvcsd);
static int hvcs_get_pi(struct hvcs_struct *hvcsd);
static int hvcs_rescan_devices_list(void);
static int hvcs_partner_connect(struct hvcs_struct *hvcsd);
static void hvcs_partner_free(struct hvcs_struct *hvcsd);
static int hvcs_enable_device(struct hvcs_struct *hvcsd,
uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
static int hvcs_open(struct tty_struct *tty, struct file *filp);
static void hvcs_close(struct tty_struct *tty, struct file *filp);
static void hvcs_hangup(struct tty_struct * tty);
static int __devinit hvcs_probe(struct vio_dev *dev,
const struct vio_device_id *id);
static int __devexit hvcs_remove(struct vio_dev *dev);
static int __init hvcs_module_init(void);
static void __exit hvcs_module_exit(void);
#define HVCS_SCHED_READ 0x00000001
#define HVCS_QUICK_READ 0x00000002
#define HVCS_TRY_WRITE 0x00000004
#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
{
return dev_get_drvdata(&viod->dev);
}
/* The sysfs interface for the driver and devices */
static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
size_t count)
{
/*
* Don't need this feature at the present time because firmware doesn't
* yet support multiple partners.
*/
printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
return -EPERM;
}
static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(current_vty,
S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
/* writing a '0' to this sysfs entry will result in the disconnect. */
if (simple_strtol(buf, NULL, 0) != 0)
return -EINVAL;
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->open_count > 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
printk(KERN_INFO "HVCS: vterm state unchanged. "
"The hvcs device node is still in use.\n");
return -EPERM;
}
if (hvcsd->connected == 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
printk(KERN_INFO "HVCS: vterm state unchanged. The"
" vty-server is not connected to a vty.\n");
return -EPERM;
}
hvcs_partner_free(hvcsd);
printk(KERN_INFO "HVCS: Closed vty-server@%X and"
" partner vty@%X:%d connection.\n",
hvcsd->vdev->unit_address,
hvcsd->p_unit_address,
(uint32_t)hvcsd->p_partition_ID);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return count;
}
static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%d\n", hvcsd->connected);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
hvcs_vterm_state_show, hvcs_vterm_state_store);
static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct vio_dev *viod = to_vio_dev(dev);
struct hvcs_struct *hvcsd = from_vio_dev(viod);
unsigned long flags;
int retval;
spin_lock_irqsave(&hvcsd->lock, flags);
retval = sprintf(buf, "%d\n", hvcsd->index);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return retval;
}
static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
static struct attribute *hvcs_attrs[] = {
&dev_attr_partner_vtys.attr,
&dev_attr_partner_clcs.attr,
&dev_attr_current_vty.attr,
&dev_attr_vterm_state.attr,
&dev_attr_index.attr,
NULL,
};
static struct attribute_group hvcs_attr_group = {
.attrs = hvcs_attrs,
};
static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
{
/* A 1 means it is updating, a 0 means it is done updating */
return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
}
static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
size_t count)
{
if ((simple_strtol(buf, NULL, 0) != 1)
&& (hvcs_rescan_status != 0))
return -EINVAL;
hvcs_rescan_status = 1;
printk(KERN_INFO "HVCS: rescanning partner info for all"
" vty-servers.\n");
hvcs_rescan_devices_list();
hvcs_rescan_status = 0;
return count;
}
static DRIVER_ATTR(rescan,
S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
static void hvcs_kick(void)
{
hvcs_kicked = 1;
wmb();
wake_up_process(hvcs_task);
}
static void hvcs_unthrottle(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&hvcsd->lock, flags);
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
hvcs_kick();
}
static void hvcs_throttle(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&hvcsd->lock, flags);
vio_disable_interrupts(hvcsd->vdev);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
/*
* If the device is being removed we don't have to worry about this interrupt
* handler taking any further interrupts because they are disabled which means
* the hvcs_struct will always be valid in this handler.
*/
static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance)
{
struct hvcs_struct *hvcsd = dev_instance;
spin_lock(&hvcsd->lock);
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock(&hvcsd->lock);
hvcs_kick();
return IRQ_HANDLED;
}
/* This function must be called with the hvcsd->lock held */
static void hvcs_try_write(struct hvcs_struct *hvcsd)
{
uint32_t unit_address = hvcsd->vdev->unit_address;
struct tty_struct *tty = hvcsd->tty;
int sent;
if (hvcsd->todo_mask & HVCS_TRY_WRITE) {
/* won't send partial writes */
sent = hvc_put_chars(unit_address,
&hvcsd->buffer[0],
hvcsd->chars_in_buffer );
if (sent > 0) {
hvcsd->chars_in_buffer = 0;
/* wmb(); */
hvcsd->todo_mask &= ~(HVCS_TRY_WRITE);
/* wmb(); */
/*
* We are still obligated to deliver the data to the
* hypervisor even if the tty has been closed because
* we commited to delivering it. But don't try to wake
* a non-existent tty.
*/
if (tty) {
tty_wakeup(tty);
}
}
}
}
static int hvcs_io(struct hvcs_struct *hvcsd)
{
uint32_t unit_address;
struct tty_struct *tty;
char buf[HVCS_BUFF_LEN] __ALIGNED__;
unsigned long flags;
int got = 0;
spin_lock_irqsave(&hvcsd->lock, flags);
unit_address = hvcsd->vdev->unit_address;
tty = hvcsd->tty;
hvcs_try_write(hvcsd);
if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) {
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
goto bail;
} else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
goto bail;
/* remove the read masks */
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
if (tty_buffer_request_room(tty, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
got = hvc_get_chars(unit_address,
&buf[0],
HVCS_BUFF_LEN);
tty_insert_flip_string(tty, buf, got);
}
/* Give the TTY time to process the data we just sent. */
if (got)
hvcsd->todo_mask |= HVCS_QUICK_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
/* This is synch because tty->low_latency == 1 */
if(got)
tty_flip_buffer_push(tty);
if (!got) {
/* Do this _after_ the flip_buffer_push */
spin_lock_irqsave(&hvcsd->lock, flags);
vio_enable_interrupts(hvcsd->vdev);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
return hvcsd->todo_mask;
bail:
spin_unlock_irqrestore(&hvcsd->lock, flags);
return hvcsd->todo_mask;
}
static int khvcsd(void *unused)
{
struct hvcs_struct *hvcsd;
int hvcs_todo_mask;
__set_current_state(TASK_RUNNING);
do {
hvcs_todo_mask = 0;
hvcs_kicked = 0;
wmb();
spin_lock(&hvcs_structs_lock);
list_for_each_entry(hvcsd, &hvcs_structs, next) {
hvcs_todo_mask |= hvcs_io(hvcsd);
}
spin_unlock(&hvcs_structs_lock);
/*
* If any of the hvcs adapters want to try a write or quick read
* don't schedule(), yield a smidgen then execute the hvcs_io
* thread again for those that want the write.
*/
if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) {
yield();
continue;
}
set_current_state(TASK_INTERRUPTIBLE);
if (!hvcs_kicked)
schedule();
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
return 0;
}
static struct vio_device_id hvcs_driver_table[] __devinitdata= {
{"serial-server", "hvterm2"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
static void hvcs_return_index(int index)
{
/* Paranoia check */
if (!hvcs_index_list)
return;
if (index < 0 || index >= hvcs_index_count)
return;
if (hvcs_index_list[index] == -1)
return;
else
hvcs_index_list[index] = -1;
}
/* callback when the kref ref count reaches zero */
static void destroy_hvcs_struct(struct kref *kref)
{
struct hvcs_struct *hvcsd = from_kref(kref);
struct vio_dev *vdev;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
spin_lock_irqsave(&hvcsd->lock, flags);
/* the list_del poisons the pointers */
list_del(&(hvcsd->next));
if (hvcsd->connected == 1) {
hvcs_partner_free(hvcsd);
printk(KERN_INFO "HVCS: Closed vty-server@%X and"
" partner vty@%X:%d connection.\n",
hvcsd->vdev->unit_address,
hvcsd->p_unit_address,
(uint32_t)hvcsd->p_partition_ID);
}
printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
hvcsd->vdev->unit_address);
vdev = hvcsd->vdev;
hvcsd->vdev = NULL;
hvcsd->p_unit_address = 0;
hvcsd->p_partition_ID = 0;
hvcs_return_index(hvcsd->index);
memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
kfree(hvcsd);
}
static int hvcs_get_index(void)
{
int i;
/* Paranoia check */
if (!hvcs_index_list) {
printk(KERN_ERR "HVCS: hvcs_index_list NOT valid!.\n");
return -EFAULT;
}
/* Find the numerically lowest first free index. */
for(i = 0; i < hvcs_index_count; i++) {
if (hvcs_index_list[i] == -1) {
hvcs_index_list[i] = 0;
return i;
}
}
return -1;
}
static int __devinit hvcs_probe(
struct vio_dev *dev,
const struct vio_device_id *id)
{
struct hvcs_struct *hvcsd;
int index;
int retval;
if (!dev || !id) {
printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
return -EPERM;
}
/* early to avoid cleanup on failure */
index = hvcs_get_index();
if (index < 0) {
return -EFAULT;
}
hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL);
if (!hvcsd)
return -ENODEV;
spin_lock_init(&hvcsd->lock);
/* Automatically incs the refcount the first time */
kref_init(&hvcsd->kref);
hvcsd->vdev = dev;
dev_set_drvdata(&dev->dev, hvcsd);
hvcsd->index = index;
/* hvcsd->index = ++hvcs_struct_count; */
hvcsd->chars_in_buffer = 0;
hvcsd->todo_mask = 0;
hvcsd->connected = 0;
/*
* This will populate the hvcs_struct's partner info fields for the
* first time.
*/
if (hvcs_get_pi(hvcsd)) {
printk(KERN_ERR "HVCS: Failed to fetch partner"
" info for vty-server@%X on device probe.\n",
hvcsd->vdev->unit_address);
}
/*
* If a user app opens a tty that corresponds to this vty-server before
* the hvcs_struct has been added to the devices list then the user app
* will get -ENODEV.
*/
spin_lock(&hvcs_structs_lock);
list_add_tail(&(hvcsd->next), &hvcs_structs);
spin_unlock(&hvcs_structs_lock);
retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
if (retval) {
printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
hvcsd->vdev->unit_address);
return retval;
}
printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
/*
* DON'T enable interrupts here because there is no user to receive the
* data.
*/
return 0;
}
static int __devexit hvcs_remove(struct vio_dev *dev)
{
struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
unsigned long flags;
struct tty_struct *tty;
if (!hvcsd)
return -ENODEV;
/* By this time the vty-server won't be getting any more interrupts */
spin_lock_irqsave(&hvcsd->lock, flags);
tty = hvcsd->tty;
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
* Let the last holder of this object cause it to be removed, which
* would probably be tty_hangup below.
*/
kref_put(&hvcsd->kref, destroy_hvcs_struct);
/*
* The hangup is a scheduled function which will auto chain call
* hvcs_hangup. The tty should always be valid at this time unless a
* simultaneous tty close already cleaned up the hvcs_struct.
*/
if (tty)
tty_hangup(tty);
printk(KERN_INFO "HVCS: vty-server@%X removed from the"
" vio bus.\n", dev->unit_address);
return 0;
};
static struct vio_driver hvcs_vio_driver = {
.id_table = hvcs_driver_table,
.probe = hvcs_probe,
.remove = __devexit_p(hvcs_remove),
.driver = {
.name = hvcs_driver_name,
.owner = THIS_MODULE,
}
};
/* Only called from hvcs_get_pi please */
static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
{
int clclength;
hvcsd->p_unit_address = pi->unit_address;
hvcsd->p_partition_ID = pi->partition_ID;
clclength = strlen(&pi->location_code[0]);
if (clclength > HVCS_CLC_LENGTH)
clclength = HVCS_CLC_LENGTH;
/* copy the null-term char too */
strncpy(&hvcsd->p_location_code[0],
&pi->location_code[0], clclength + 1);
}
/*
* Traverse the list and add the partner info that is found to the hvcs_struct
* struct entry. NOTE: At this time I know that partner info will return a
* single entry but in the future there may be multiple partner info entries per
* vty-server and you'll want to zero out that list and reset it. If for some
* reason you have an old version of this driver but there IS more than one
* partner info then hvcsd->p_* will hold the last partner info data from the
* firmware query. A good way to update this code would be to replace the three
* partner info fields in hvcs_struct with a list of hvcs_partner_info
* instances.
*
* This function must be called with the hvcsd->lock held.
*/
static int hvcs_get_pi(struct hvcs_struct *hvcsd)
{
struct hvcs_partner_info *pi;
uint32_t unit_address = hvcsd->vdev->unit_address;
struct list_head head;
int retval;
spin_lock(&hvcs_pi_lock);
if (!hvcs_pi_buff) {
spin_unlock(&hvcs_pi_lock);
return -EFAULT;
}
retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff);
spin_unlock(&hvcs_pi_lock);
if (retval) {
printk(KERN_ERR "HVCS: Failed to fetch partner"
" info for vty-server@%x.\n", unit_address);
return retval;
}
/* nixes the values if the partner vty went away */
hvcsd->p_unit_address = 0;
hvcsd->p_partition_ID = 0;
list_for_each_entry(pi, &head, node)
hvcs_set_pi(pi, hvcsd);
hvcs_free_partner_info(&head);
return 0;
}
/*
* This function is executed by the driver "rescan" sysfs entry. It shouldn't
* be executed elsewhere, in order to prevent deadlock issues.
*/
static int hvcs_rescan_devices_list(void)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
list_for_each_entry(hvcsd, &hvcs_structs, next) {
spin_lock_irqsave(&hvcsd->lock, flags);
hvcs_get_pi(hvcsd);
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
spin_unlock(&hvcs_structs_lock);
return 0;
}
/*
* Farm this off into its own function because it could be more complex once
* multiple partners support is added. This function should be called with
* the hvcsd->lock held.
*/
static int hvcs_has_pi(struct hvcs_struct *hvcsd)
{
if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID))
return 0;
return 1;
}
/*
* NOTE: It is possible that the super admin removed a partner vty and then
* added a different vty as the new partner.
*
* This function must be called with the hvcsd->lock held.
*/
static int hvcs_partner_connect(struct hvcs_struct *hvcsd)
{
int retval;
unsigned int unit_address = hvcsd->vdev->unit_address;
/*
* If there wasn't any pi when the device was added it doesn't meant
* there isn't any now. This driver isn't notified when a new partner
* vty is added to a vty-server so we discover changes on our own.
* Please see comments in hvcs_register_connection() for justification
* of this bizarre code.
*/
retval = hvcs_register_connection(unit_address,
hvcsd->p_partition_ID,
hvcsd->p_unit_address);
if (!retval) {
hvcsd->connected = 1;
return 0;
} else if (retval != -EINVAL)
return retval;
/*
* As per the spec re-get the pi and try again if -EINVAL after the
* first connection attempt.
*/
if (hvcs_get_pi(hvcsd))
return -ENOMEM;
if (!hvcs_has_pi(hvcsd))
return -ENODEV;
retval = hvcs_register_connection(unit_address,
hvcsd->p_partition_ID,
hvcsd->p_unit_address);
if (retval != -EINVAL) {
hvcsd->connected = 1;
return retval;
}
/*
* EBUSY is the most likely scenario though the vty could have been
* removed or there really could be an hcall error due to the parameter
* data but thanks to ambiguous firmware return codes we can't really
* tell.
*/
printk(KERN_INFO "HVCS: vty-server or partner"
" vty is busy. Try again later.\n");
return -EBUSY;
}
/* This function must be called with the hvcsd->lock held */
static void hvcs_partner_free(struct hvcs_struct *hvcsd)
{
int retval;
do {
retval = hvcs_free_connection(hvcsd->vdev->unit_address);
} while (retval == -EBUSY);
hvcsd->connected = 0;
}
/* This helper function must be called WITHOUT the hvcsd->lock held */
static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
unsigned int irq, struct vio_dev *vdev)
{
unsigned long flags;
int rc;
/*
* It is possible that the vty-server was removed between the time that
* the conn was registered and now.
*/
if (!(rc = request_irq(irq, &hvcs_handle_interrupt,
IRQF_DISABLED, "ibmhvcs", hvcsd))) {
/*
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.
*/
if (vio_enable_interrupts(vdev) == H_SUCCESS)
return 0;
else {
printk(KERN_ERR "HVCS: int enable failed for"
" vty-server@%X.\n", unit_address);
free_irq(irq, hvcsd);
}
} else
printk(KERN_ERR "HVCS: irq req failed for"
" vty-server@%X.\n", unit_address);
spin_lock_irqsave(&hvcsd->lock, flags);
hvcs_partner_free(hvcsd);
spin_unlock_irqrestore(&hvcsd->lock, flags);
return rc;
}
/*
* This always increments the kref ref count if the call is successful.
* Please remember to dec when you are done with the instance.
*
* NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
* calling this function or you will get deadlock.
*/
static struct hvcs_struct *hvcs_get_by_index(int index)
{
struct hvcs_struct *hvcsd = NULL;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
/* We can immediately discard OOB requests */
if (index >= 0 && index < HVCS_MAX_SERVER_ADAPTERS) {
list_for_each_entry(hvcsd, &hvcs_structs, next) {
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->index == index) {
kref_get(&hvcsd->kref);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
return hvcsd;
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
}
hvcsd = NULL;
}
spin_unlock(&hvcs_structs_lock);
return hvcsd;
}
/*
* This is invoked via the tty_open interface when a user app connects to the
* /dev node.
*/
static int hvcs_open(struct tty_struct *tty, struct file *filp)
{
struct hvcs_struct *hvcsd;
int rc, retval = 0;
unsigned long flags;
unsigned int irq;
struct vio_dev *vdev;
unsigned long unit_address;
if (tty->driver_data)
goto fast_open;
/*
* Is there a vty-server that shares the same index?
* This function increments the kref index.
*/
if (!(hvcsd = hvcs_get_by_index(tty->index))) {
printk(KERN_WARNING "HVCS: open failed, no device associated"
" with tty->index %d.\n", tty->index);
return -ENODEV;
}
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->connected == 0)
if ((retval = hvcs_partner_connect(hvcsd)))
goto error_release;
hvcsd->open_count = 1;
hvcsd->tty = tty;
tty->driver_data = hvcsd;
memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
/*
* Save these in the spinlock for the enable operations that need them
* outside of the spinlock.
*/
irq = hvcsd->vdev->irq;
vdev = hvcsd->vdev;
unit_address = hvcsd->vdev->unit_address;
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
* This must be done outside of the spinlock because it requests irqs
* and will grab the spinlock and free the connection if it fails.
*/
if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
kref_put(&hvcsd->kref, destroy_hvcs_struct);
printk(KERN_WARNING "HVCS: enable device failed.\n");
return rc;
}
goto open_success;
fast_open:
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
kref_get(&hvcsd->kref);
hvcsd->open_count++;
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
open_success:
hvcs_kick();
printk(KERN_INFO "HVCS: vty-server@%X connection opened.\n",
hvcsd->vdev->unit_address );
return 0;
error_release:
spin_unlock_irqrestore(&hvcsd->lock, flags);
kref_put(&hvcsd->kref, destroy_hvcs_struct);
printk(KERN_WARNING "HVCS: partner connect failed.\n");
return retval;
}
static void hvcs_close(struct tty_struct *tty, struct file *filp)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
int irq = NO_IRQ;
/*
* Is someone trying to close the file associated with this device after
* we have hung up? If so tty->driver_data wouldn't be valid.
*/
if (tty_hung_up_p(filp))
return;
/*
* No driver_data means that this close was probably issued after a
* failed hvcs_open by the tty layer's release_dev() api and we can just
* exit cleanly.
*/
if (!tty->driver_data)
return;
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
if (--hvcsd->open_count == 0) {
vio_disable_interrupts(hvcsd->vdev);
/*
* NULL this early so that the kernel_thread doesn't try to
* execute any operations on the TTY even though it is obligated
* to deliver any pending I/O to the hypervisor.
*/
hvcsd->tty = NULL;
irq = hvcsd->vdev->irq;
spin_unlock_irqrestore(&hvcsd->lock, flags);
tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
/*
* This line is important because it tells hvcs_open that this
* device needs to be re-configured the next time hvcs_open is
* called.
*/
tty->driver_data = NULL;
free_irq(irq, hvcsd);
kref_put(&hvcsd->kref, destroy_hvcs_struct);
return;
} else if (hvcsd->open_count < 0) {
printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
" is missmanaged.\n",
hvcsd->vdev->unit_address, hvcsd->open_count);
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
kref_put(&hvcsd->kref, destroy_hvcs_struct);
}
static void hvcs_hangup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
int temp_open_count;
int irq = NO_IRQ;
spin_lock_irqsave(&hvcsd->lock, flags);
/* Preserve this so that we know how many kref refs to put */
temp_open_count = hvcsd->open_count;
/*
* Don't kref put inside the spinlock because the destruction
* callback may use the spinlock and it may get called before the
* spinlock has been released.
*/
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask = 0;
/* I don't think the tty needs the hvcs_struct pointer after a hangup */
hvcsd->tty->driver_data = NULL;
hvcsd->tty = NULL;
hvcsd->open_count = 0;
/* This will drop any buffered data on the floor which is OK in a hangup
* scenario. */
memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
hvcsd->chars_in_buffer = 0;
irq = hvcsd->vdev->irq;
spin_unlock_irqrestore(&hvcsd->lock, flags);
free_irq(irq, hvcsd);
/*
* We need to kref_put() for every open_count we have since the
* tty_hangup() function doesn't invoke a close per open connection on a
* non-console device.
*/
while(temp_open_count) {
--temp_open_count;
/*
* The final put will trigger destruction of the hvcs_struct.
* NOTE: If this hangup was signaled from user space then the
* final put will never happen.
*/
kref_put(&hvcsd->kref, destroy_hvcs_struct);
}
}
/*
* NOTE: This is almost always from_user since user level apps interact with the
* /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by
* hvcs_remove (which removes the target device and executes tty_hangup()) that
* tty_hangup will allow hvcs_write time to complete execution before it
* terminates our device.
*/
static int hvcs_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned int unit_address;
const unsigned char *charbuf;
unsigned long flags;
int total_sent = 0;
int tosend = 0;
int result = 0;
/*
* If they don't check the return code off of their open they may
* attempt this even if there is no connected device.
*/
if (!hvcsd)
return -ENODEV;
/* Reasonable size to prevent user level flooding */
if (count > HVCS_MAX_FROM_USER) {
printk(KERN_WARNING "HVCS write: count being truncated to"
" HVCS_MAX_FROM_USER.\n");
count = HVCS_MAX_FROM_USER;
}
charbuf = buf;
spin_lock_irqsave(&hvcsd->lock, flags);
/*
* Somehow an open succedded but the device was removed or the
* connection terminated between the vty-server and partner vty during
* the middle of a write operation? This is a crummy place to do this
* but we want to keep it all in the spinlock.
*/
if (hvcsd->open_count <= 0) {
spin_unlock_irqrestore(&hvcsd->lock, flags);
return -ENODEV;
}
unit_address = hvcsd->vdev->unit_address;
while (count > 0) {
tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
/*
* No more space, this probably means that the last call to
* hvcs_write() didn't succeed and the buffer was filled up.
*/
if (!tosend)
break;
memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer],
&charbuf[total_sent],
tosend);
hvcsd->chars_in_buffer += tosend;
result = 0;
/*
* If this is true then we don't want to try writing to the
* hypervisor because that is the kernel_threads job now. We'll
* just add to the buffer.
*/
if (!(hvcsd->todo_mask & HVCS_TRY_WRITE))
/* won't send partial writes */
result = hvc_put_chars(unit_address,
&hvcsd->buffer[0],
hvcsd->chars_in_buffer);
/*
* Since we know we have enough room in hvcsd->buffer for
* tosend we record that it was sent regardless of whether the
* hypervisor actually took it because we have it buffered.
*/
total_sent+=tosend;
count-=tosend;
if (result == 0) {
hvcsd->todo_mask |= HVCS_TRY_WRITE;
hvcs_kick();
break;
}
hvcsd->chars_in_buffer = 0;
/*
* Test after the chars_in_buffer reset otherwise this could
* deadlock our writes if hvc_put_chars fails.
*/
if (result < 0)
break;
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
if (result == -1)
return -EIO;
else
return total_sent;
}
/*
* This is really asking how much can we guarentee that we can send or that we
* absolutely WILL BUFFER if we can't send it. This driver MUST honor the
* return value, hence the reason for hvcs_struct buffering.
*/
static int hvcs_write_room(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
if (!hvcsd || hvcsd->open_count <= 0)
return 0;
return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
}
static int hvcs_chars_in_buffer(struct tty_struct *tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
return hvcsd->chars_in_buffer;
}
static const struct tty_operations hvcs_ops = {
.open = hvcs_open,
.close = hvcs_close,
.hangup = hvcs_hangup,
.write = hvcs_write,
.write_room = hvcs_write_room,
.chars_in_buffer = hvcs_chars_in_buffer,
.unthrottle = hvcs_unthrottle,
.throttle = hvcs_throttle,
};
static int hvcs_alloc_index_list(int n)
{
int i;
hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
if (!hvcs_index_list)
return -ENOMEM;
hvcs_index_count = n;
for (i = 0; i < hvcs_index_count; i++)
hvcs_index_list[i] = -1;
return 0;
}
static void hvcs_free_index_list(void)
{
/* Paranoia check to be thorough. */
kfree(hvcs_index_list);
hvcs_index_list = NULL;
hvcs_index_count = 0;
}
static int __init hvcs_module_init(void)
{
int rc;
int num_ttys_to_alloc;
printk(KERN_INFO "Initializing %s\n", hvcs_driver_string);
/* Has the user specified an overload with an insmod param? */
if (hvcs_parm_num_devs <= 0 ||
(hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) {
num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS;
} else
num_ttys_to_alloc = hvcs_parm_num_devs;
hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc);
if (!hvcs_tty_driver)
return -ENOMEM;
if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
rc = -ENOMEM;
goto index_fail;
}
hvcs_tty_driver->owner = THIS_MODULE;
hvcs_tty_driver->driver_name = hvcs_driver_name;
hvcs_tty_driver->name = hvcs_device_node;
/*
* We'll let the system assign us a major number, indicated by leaving
* it blank.
*/
hvcs_tty_driver->minor_start = HVCS_MINOR_START;
hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
/*
* We role our own so that we DONT ECHO. We can't echo because the
* device we are connecting to already echoes by default and this would
* throw us into a horrible recursive echo-echo-echo loop.
*/
hvcs_tty_driver->init_termios = hvcs_tty_termios;
hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hvcs_tty_driver, &hvcs_ops);
/*
* The following call will result in sysfs entries that denote the
* dynamically assigned major and minor numbers for our devices.
*/
if (tty_register_driver(hvcs_tty_driver)) {
printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
rc = -EIO;
goto register_fail;
}
hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!hvcs_pi_buff) {
rc = -ENOMEM;
goto buff_alloc_fail;
}
hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
if (IS_ERR(hvcs_task)) {
printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n");
rc = -EIO;
goto kthread_fail;
}
rc = vio_register_driver(&hvcs_vio_driver);
if (rc) {
printk(KERN_ERR "HVCS: can't register vio driver\n");
goto vio_fail;
}
/*
* This needs to be done AFTER the vio_register_driver() call or else
* the kobjects won't be initialized properly.
*/
rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
if (rc) {
printk(KERN_ERR "HVCS: sysfs attr create failed\n");
goto attr_fail;
}
printk(KERN_INFO "HVCS: driver module inserted.\n");
return 0;
attr_fail:
vio_unregister_driver(&hvcs_vio_driver);
vio_fail:
kthread_stop(hvcs_task);
kthread_fail:
kfree(hvcs_pi_buff);
buff_alloc_fail:
tty_unregister_driver(hvcs_tty_driver);
register_fail:
hvcs_free_index_list();
index_fail:
put_tty_driver(hvcs_tty_driver);
hvcs_tty_driver = NULL;
return rc;
}
static void __exit hvcs_module_exit(void)
{
/*
* This driver receives hvcs_remove callbacks for each device upon
* module removal.
*/
/*
* This synchronous operation will wake the khvcsd kthread if it is
* asleep and will return when khvcsd has terminated.
*/
kthread_stop(hvcs_task);
spin_lock(&hvcs_pi_lock);
kfree(hvcs_pi_buff);
hvcs_pi_buff = NULL;
spin_unlock(&hvcs_pi_lock);
driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
vio_unregister_driver(&hvcs_vio_driver);
tty_unregister_driver(hvcs_tty_driver);
hvcs_free_index_list();
put_tty_driver(hvcs_tty_driver);
printk(KERN_INFO "HVCS: driver module removed.\n");
}
module_init(hvcs_module_init);
module_exit(hvcs_module_exit);
| gpl-2.0 |
buenajuan300/android_kernel_samsung_grandprimevelte | drivers/scsi/aacraid/commsup.c | 1097 | 55345 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* commsup.c
*
* Abstract: Contain all routines that are required for FSA host/adapter
* communication.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/semaphore.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
/**
* fib_map_alloc - allocate the fib objects
* @dev: Adapter to allocate for
*
* Allocate and map the shared PCI space for the FIB blocks used to
* talk to the Adaptec firmware.
*/
static int fib_map_alloc(struct aac_dev *dev)
{
dprintk((KERN_INFO
"allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
(dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
&dev->hw_fib_pa);
if (dev->hw_fib_va == NULL)
return -ENOMEM;
return 0;
}
/**
* aac_fib_map_free - free the fib objects
* @dev: Adapter to free
*
* Free the PCI mappings and the memory allocated for FIB blocks
* on this adapter.
*/
void aac_fib_map_free(struct aac_dev *dev)
{
pci_free_consistent(dev->pdev,
dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
dev->hw_fib_va, dev->hw_fib_pa);
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
*
* Allocate the PCI space for the fibs, map it and then initialise the
* fib area, the unmapped fib data and also the free list
*/
int aac_fib_setup(struct aac_dev * dev)
{
struct fib *fibptr;
struct hw_fib *hw_fib;
dma_addr_t hw_fib_pa;
int i;
while (((i = fib_map_alloc(dev)) == -ENOMEM)
&& (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
}
if (i<0)
return -ENOMEM;
/* 32 byte alignment for PMC */
hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
(hw_fib_pa - dev->hw_fib_pa));
dev->hw_fib_pa = hw_fib_pa;
memset(dev->hw_fib_va, 0,
(dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/* add Xport header */
dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
sizeof(struct aac_fib_xporthdr));
dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
hw_fib = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
/*
* Initialise the fibs
*/
for (i = 0, fibptr = &dev->fibs[i];
i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
i++, fibptr++)
{
fibptr->flags = 0;
fibptr->dev = dev;
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
sema_init(&fibptr->event_wait, 0);
spin_lock_init(&fibptr->event_lock);
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
fibptr->hw_fib_pa = hw_fib_pa;
hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
/*
* Add the fib chain to the free list
*/
dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
/*
* Enable this to debug out of queue space
*/
dev->free_fib = &dev->fibs[0];
return 0;
}
/**
* aac_fib_alloc - allocate a fib
* @dev: Adapter to allocate the fib for
*
* Allocate a fib from the adapter fib pool. If the pool is empty we
* return NULL.
*/
struct fib *aac_fib_alloc(struct aac_dev *dev)
{
struct fib * fibptr;
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
fibptr = dev->free_fib;
if(!fibptr){
spin_unlock_irqrestore(&dev->fib_lock, flags);
return fibptr;
}
dev->free_fib = fibptr->next;
spin_unlock_irqrestore(&dev->fib_lock, flags);
/*
* Set the proper node type code and node byte size
*/
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->size = sizeof(struct fib);
/*
* Null out fields that depend on being zero at the start of
* each I/O
*/
fibptr->hw_fib_va->header.XferState = 0;
fibptr->flags = 0;
fibptr->callback = NULL;
fibptr->callback_data = NULL;
return fibptr;
}
/**
* aac_fib_free - free a fib
* @fibptr: fib to free up
*
* Frees up a fib and places it on the appropriate queue
*/
void aac_fib_free(struct fib *fibptr)
{
unsigned long flags, flagsv;
spin_lock_irqsave(&fibptr->event_lock, flagsv);
if (fibptr->done == 2) {
spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
return;
}
spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
aac_config.fib_timeouts++;
if (fibptr->hw_fib_va->header.XferState != 0) {
printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
(void*)fibptr,
le32_to_cpu(fibptr->hw_fib_va->header.XferState));
}
fibptr->next = fibptr->dev->free_fib;
fibptr->dev->free_fib = fibptr;
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
}
/**
* aac_fib_init - initialise a fib
* @fibptr: The fib to initialize
*
* Set up the generic fib fields ready for use
*/
void aac_fib_init(struct fib *fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
hw_fib->header.StructType = FIB_MAGIC;
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
}
/**
* fib_deallocate - deallocate a fib
* @fibptr: fib to deallocate
*
* Will deallocate and return to the free pool the FIB pointed to by the
* caller.
*/
static void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
hw_fib->header.XferState = 0;
}
/*
* Commuication primitives define and support the queuing method we use to
* support host to adapter commuication. All queue accesses happen through
* these routines and are the only routines which have a knowledge of the
* how these queues are implemented.
*/
/**
* aac_get_entry - get a queue entry
* @dev: Adapter
* @qid: Queue Number
* @entry: Entry return
* @index: Index return
* @nonotify: notification control
*
* With a priority the routine returns a queue entry if the queue has free entries. If the queue
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
* returned.
*/
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
{
struct aac_queue * q;
unsigned long idx;
/*
* All of the queues wrap when they reach the end, so we check
* to see if they have reached the end and if they have we just
* set the index back to zero. This is a wrap. You could or off
* the high bits in all updates but this is a bit faster I think.
*/
q = &dev->queues->queue[qid];
idx = *index = le32_to_cpu(*(q->headers.producer));
/* Interrupt Moderation, only interrupt for first two entries */
if (idx != le32_to_cpu(*(q->headers.consumer))) {
if (--idx == 0) {
if (qid == AdapNormCmdQueue)
idx = ADAP_NORM_CMD_ENTRIES;
else
idx = ADAP_NORM_RESP_ENTRIES;
}
if (idx != le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1;
}
if (qid == AdapNormCmdQueue) {
if (*index >= ADAP_NORM_CMD_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
} else {
if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
/* Queue is full */
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
qid, q->numpending);
return 0;
} else {
*entry = q->base + *index;
return 1;
}
}
/**
* aac_queue_get - get the next free QE
* @dev: Adapter
* @index: Returned index
* @priority: Priority of fib
* @fib: Fib to associate with the queue entry
* @wait: Wait if queue full
* @fibptr: Driver fib object to go with fib
* @nonotify: Don't notify the adapter
*
* Gets the next free QE off the requested priorty adapter command
* queue and associates the Fib with the QE. The QE represented by
* index is ready to insert on the queue when this routine returns
* success.
*/
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
{
struct aac_entry * entry = NULL;
int map = 0;
if (qid == AdapNormCmdQueue) {
/* if no entries wait for some if caller wants to */
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
printk(KERN_ERR "GetEntries failed\n");
}
/*
* Setup queue entry with a command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
map = 1;
} else {
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
/* if no entries wait for some if caller wants to */
}
/*
* Setup queue entry with command, status and fib mapped
*/
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
entry->addr = hw_fib->header.SenderFibAddress;
/* Restore adapters pointer to the FIB */
hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
map = 0;
}
/*
* If MapFib is true than we need to map the Fib and put pointers
* in the queue entry.
*/
if (map)
entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
return 0;
}
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
* routines have no knowledge of the commuication method used. This level
* sends and receives FIBs. This level has no knowledge of how these FIBs
* get passed back and forth.
*/
/**
* aac_fib_send - send a fib to the adapter
* @command: Command to send
* @fibptr: The fib
* @size: Size of fib data area
* @priority: Priority of Fib
* @wait: Async/sync select
* @reply: True if a reply is wanted
* @callback: Called with reply
* @callback_data: Passed to callback
*
* Sends the requested FIB to the adapter and optionally will wait for a
* response FIB. If the caller does not wish to wait for a response than
* an event to wait on must be supplied. This event will be set when a
* response FIB is received from the adapter.
*/
int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
int priority, int wait, int reply, fib_callback callback,
void *callback_data)
{
struct aac_dev * dev = fibptr->dev;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
unsigned long qflags;
unsigned long mflags = 0;
unsigned long sflags = 0;
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
/*
* There are 5 cases with the wait and response requested flags.
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
* is not requesed the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
* further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
* it had a problem and the host can log that fact.
*/
fibptr->flags = 0;
if (wait && !reply) {
return -EINVAL;
} else if (!wait && reply) {
hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
} else if (!wait && !reply) {
hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
} else if (wait && reply) {
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
}
/*
* Map the fib into 32bits by using the fib number
*/
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
* caller.
*
* Map the hw fib pointer as a 32bit value
*/
hw_fib->header.Command = cpu_to_le16(command);
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
/*
* Set the size of the Fib we want to send to the adapter
*/
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
return -EMSGSIZE;
}
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
*/
if (!wait) {
fibptr->callback = callback;
fibptr->callback_data = callback_data;
fibptr->flags = FIB_CONTEXT_FLAG;
}
fibptr->done = 0;
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
dprintk((KERN_DEBUG "Fib contents:.\n"));
dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
if (!dev->queues)
return -EBUSY;
if (wait) {
spin_lock_irqsave(&dev->manage_lock, mflags);
if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
printk(KERN_INFO "No management Fibs Available:%d\n",
dev->management_fib_count);
spin_unlock_irqrestore(&dev->manage_lock, mflags);
return -EBUSY;
}
dev->management_fib_count++;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
spin_lock_irqsave(&fibptr->event_lock, flags);
}
if (dev->sync_mode) {
if (wait)
spin_unlock_irqrestore(&fibptr->event_lock, flags);
spin_lock_irqsave(&dev->sync_lock, sflags);
if (dev->sync_fib) {
list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
spin_unlock_irqrestore(&dev->sync_lock, sflags);
} else {
dev->sync_fib = fibptr;
spin_unlock_irqrestore(&dev->sync_lock, sflags);
aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
(u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
if (wait) {
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
if (down_interruptible(&fibptr->event_wait)) {
fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
return -EFAULT;
}
return 0;
}
return -EINPROGRESS;
}
if (aac_adapter_deliver(fibptr) != 0) {
printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
spin_lock_irqsave(&dev->manage_lock, mflags);
dev->management_fib_count--;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
}
return -EBUSY;
}
/*
* If the caller wanted us to wait for response wait now.
*/
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
/* Only set for first known interruptable command */
if (wait < 0) {
/*
* *VERY* Dangerous to time out a command, the
* assumption is made that we have no hope of
* functioning because an interrupt routing or other
* hardware failure has occurred.
*/
unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
while (down_trylock(&fibptr->event_wait)) {
int blink;
if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
spin_unlock_irqrestore(q->lock, qflags);
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
"Usually a result of a PCI interrupt routing problem;\n"
"update mother board BIOS or consider utilizing one of\n"
"the SAFE mode kernel options (acpi, apic etc)\n");
}
return -ETIMEDOUT;
}
if ((blink = aac_adapter_check_health(dev)) > 0) {
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
"Usually a result of a serious unrecoverable hardware problem\n",
blink);
}
return -EFAULT;
}
/* We used to udelay() here but that absorbed
* a CPU when a timeout occured. Not very
* useful. */
cpu_relax();
}
} else if (down_interruptible(&fibptr->event_wait)) {
/* Do nothing ... satisfy
* down_interruptible must_check */
}
spin_lock_irqsave(&fibptr->event_lock, flags);
if (fibptr->done == 0) {
fibptr->done = 2; /* Tell interrupt we aborted */
spin_unlock_irqrestore(&fibptr->event_lock, flags);
return -ERESTARTSYS;
}
spin_unlock_irqrestore(&fibptr->event_lock, flags);
BUG_ON(fibptr->done == 0);
if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
return -ETIMEDOUT;
return 0;
}
/*
* If the user does not want a response than return success otherwise
* return pending
*/
if (reply)
return -EINPROGRESS;
else
return 0;
}
/**
* aac_consumer_get - get the top of the queue
* @dev: Adapter
* @q: Queue
* @entry: Return entry
*
* Will return a pointer to the entry on the top of the queue requested that
* we are a consumer of, and return the address of the queue entry. It does
* not change the state of the queue.
*/
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
{
u32 index;
int status;
if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
status = 0;
} else {
/*
* The consumer index must be wrapped if we have reached
* the end of the queue, else we just use the entry
* pointed to by the header index
*/
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
index = 0;
else
index = le32_to_cpu(*q->headers.consumer);
*entry = q->base + index;
status = 1;
}
return(status);
}
/**
* aac_consumer_free - free consumer entry
* @dev: Adapter
* @q: Queue
* @qid: Queue ident
*
* Frees up the current top of the queue we are a consumer of. If the
* queue was full notify the producer that the queue is no longer full.
*/
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
{
int wasfull = 0;
u32 notify;
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
wasfull = 1;
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
*q->headers.consumer = cpu_to_le32(1);
else
le32_add_cpu(q->headers.consumer, 1);
if (wasfull) {
switch (qid) {
case HostNormCmdQueue:
notify = HostNormCmdNotFull;
break;
case HostNormRespQueue:
notify = HostNormRespNotFull;
break;
default:
BUG();
return;
}
aac_adapter_notify(dev, notify);
}
}
/**
* aac_fib_adapter_complete - complete adapter issued fib
* @fibptr: fib to complete
* @size: size of fib
*
* Will do all necessary work to complete a FIB that was sent from
* the adapter.
*/
int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
struct aac_dev * dev = fibptr->dev;
struct aac_queue * q;
unsigned long nointr = 0;
unsigned long qflags;
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
kfree(hw_fib);
return 0;
}
if (hw_fib->header.XferState == 0) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree(hw_fib);
return 0;
}
/*
* If we plan to do anything check the structure type first.
*/
if (hw_fib->header.StructType != FIB_MAGIC &&
hw_fib->header.StructType != FIB_MAGIC2 &&
hw_fib->header.StructType != FIB_MAGIC2_64) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree(hw_fib);
return -EINVAL;
}
/*
* This block handles the case where the adapter had sent us a
* command and we have finished processing the command. We
* call completeFib when we are done processing the command
* and want to send a response back to the adapter. This will
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
if (dev->comm_interface == AAC_COMM_MESSAGE) {
kfree (hw_fib);
} else {
u32 index;
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (size) {
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
hw_fib->header.Size = cpu_to_le16(size);
}
q = &dev->queues->queue[AdapNormRespQueue];
spin_lock_irqsave(q->lock, qflags);
aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & (int)aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormRespQueue);
}
} else {
printk(KERN_WARNING "aac_fib_adapter_complete: "
"Unknown xferstate detected.\n");
BUG();
}
return 0;
}
/**
* aac_fib_complete - fib completion handler
* @fib: FIB to complete
*
* Will do all necessary work to complete a FIB.
*/
int aac_fib_complete(struct fib *fibptr)
{
unsigned long flags;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
* Check for a fib which has already been completed
*/
if (hw_fib->header.XferState == 0)
return 0;
/*
* If we plan to do anything check the structure type first.
*/
if (hw_fib->header.StructType != FIB_MAGIC &&
hw_fib->header.StructType != FIB_MAGIC2 &&
hw_fib->header.StructType != FIB_MAGIC2_64)
return -EINVAL;
/*
* This block completes a cdb which orginated on the host and we
* just need to deallocate the cdb or reinit it. At this point the
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
spin_lock_irqsave(&fibptr->event_lock, flags);
if (fibptr->done == 2) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fibptr->event_lock, flags);
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
{
fib_dealloc(fibptr);
}
else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
{
/*
* This handles the case when the host has aborted the I/O
* to the adapter because the adapter is not responding
*/
fib_dealloc(fibptr);
} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
fib_dealloc(fibptr);
} else {
BUG();
}
return 0;
}
/**
* aac_printf - handle printf from firmware
* @dev: Adapter
* @val: Message info
*
* Print a message passed to us by the controller firmware on the
* Adaptec board
*/
void aac_printf(struct aac_dev *dev, u32 val)
{
char *cp = dev->printfbuf;
if (dev->printf_enabled)
{
int length = val & 0xffff;
int level = (val >> 16) & 0xffff;
/*
* The size of the printfbuf is set in port.c
* There is no variable or define for it
*/
if (length > 255)
length = 255;
if (cp[length] != 0)
cp[length] = 0;
if (level == LOG_AAC_HIGH_ERROR)
printk(KERN_WARNING "%s:%s", dev->name, cp);
else
printk(KERN_INFO "%s:%s", dev->name, cp);
}
memset(cp, 0, 256);
}
/**
* aac_handle_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling.
*/
#define AIF_SNIFF_TIMEOUT (30*HZ)
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
u32 channel, id, lun, container;
struct scsi_device *device;
enum {
NOTHING,
DELETE,
ADD,
CHANGE
} device_config_needed = NOTHING;
/* Sniff for container changes */
if (!dev || !dev->fsa_dev)
return;
container = channel = id = lun = (u32)-1;
/*
* We have set this up to try and minimize the number of
* re-configures that take place. As a result of this when
* certain AIF's come in we will set a flag waiting for another
* type of AIF before setting the re-config flag.
*/
switch (le32_to_cpu(aifcmd->command)) {
case AifCmdDriverNotify:
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
/*
* Morph or Expand complete
*/
case AifDenMorphComplete:
case AifDenVolumeExtendComplete:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
/*
* Find the scsi_device associated with the SCSI
* address. Make sure we have the right array, and if
* so set the flag to initiate a new re-config once we
* see an AifEnConfigChange AIF come through.
*/
if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
device = scsi_device_lookup(dev->scsi_host_ptr,
CONTAINER_TO_CHANNEL(container),
CONTAINER_TO_ID(container),
CONTAINER_TO_LUN(container));
if (device) {
dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
scsi_device_put(device);
}
}
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re-configure flag.
*/
if (container != (u32)-1) {
if (container >= dev->maximum_num_containers)
break;
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
break;
case AifCmdEventNotify:
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
case AifEnBatteryEvent:
dev->cache_protected =
(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
break;
/*
* Add an Array.
*/
case AifEnAddContainer:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
/*
* Delete an Array.
*/
case AifEnDeleteContainer:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
/*
* Container change detected. If we currently are not
* waiting on something else, setup to wait on a Config Change.
*/
case AifEnContainerChange:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
if (dev->fsa_dev[container].config_waiting_on &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
break;
dev->fsa_dev[container].config_needed = CHANGE;
dev->fsa_dev[container].config_waiting_on =
AifEnConfigChange;
dev->fsa_dev[container].config_waiting_stamp = jiffies;
break;
case AifEnConfigChange:
break;
case AifEnAddJBOD:
case AifEnDeleteJBOD:
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if ((container >> 28)) {
container = (u32)-1;
break;
}
channel = (container >> 24) & 0xF;
if (channel >= dev->maximum_num_channels) {
container = (u32)-1;
break;
}
id = container & 0xFFFF;
if (id >= dev->maximum_num_physicals) {
container = (u32)-1;
break;
}
lun = (container >> 16) & 0xFF;
container = (u32)-1;
channel = aac_phys_to_logical(channel);
device_config_needed =
(((__le32 *)aifcmd->data)[0] ==
cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
if (device_config_needed == ADD) {
device = scsi_device_lookup(dev->scsi_host_ptr,
channel,
id,
lun);
if (device) {
scsi_remove_device(device);
scsi_device_put(device);
}
}
break;
case AifEnEnclosureManagement:
/*
* If in JBOD mode, automatic exposure of new
* physical target to be suppressed until configured.
*/
if (dev->jbod)
break;
switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
case EM_DRIVE_INSERTION:
case EM_DRIVE_REMOVAL:
container = le32_to_cpu(
((__le32 *)aifcmd->data)[2]);
if ((container >> 28)) {
container = (u32)-1;
break;
}
channel = (container >> 24) & 0xF;
if (channel >= dev->maximum_num_channels) {
container = (u32)-1;
break;
}
id = container & 0xFFFF;
lun = (container >> 16) & 0xFF;
container = (u32)-1;
if (id >= dev->maximum_num_physicals) {
/* legacy dev_t ? */
if ((0x2000 <= id) || lun || channel ||
((channel = (id >> 7) & 0x3F) >=
dev->maximum_num_channels))
break;
lun = (id >> 4) & 7;
id &= 0xF;
}
channel = aac_phys_to_logical(channel);
device_config_needed =
(((__le32 *)aifcmd->data)[3]
== cpu_to_le32(EM_DRIVE_INSERTION)) ?
ADD : DELETE;
break;
}
break;
}
/*
* If we are waiting on something and this happens to be
* that thing then set the re-configure flag.
*/
if (container != (u32)-1) {
if (container >= dev->maximum_num_containers)
break;
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on ==
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
break;
case AifCmdJobProgress:
/*
* These are job progress AIF's. When a Clear is being
* done on a container it is initially created then hidden from
* the OS. When the clear completes we don't get a config
* change so we monitor the job status complete on a clear then
* wait for a container change.
*/
if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
(((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
for (container = 0;
container < dev->maximum_num_containers;
++container) {
/*
* Stomp on all config sequencing for all
* containers?
*/
dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange;
dev->fsa_dev[container].config_needed = ADD;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
}
}
if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
((__le32 *)aifcmd->data)[6] == 0 &&
((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
for (container = 0;
container < dev->maximum_num_containers;
++container) {
/*
* Stomp on all config sequencing for all
* containers?
*/
dev->fsa_dev[container].config_waiting_on =
AifEnContainerChange;
dev->fsa_dev[container].config_needed = DELETE;
dev->fsa_dev[container].config_waiting_stamp =
jiffies;
}
}
break;
}
container = 0;
retry_next:
if (device_config_needed == NOTHING)
for (; container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
(dev->fsa_dev[container].config_needed != NOTHING) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
device_config_needed =
dev->fsa_dev[container].config_needed;
dev->fsa_dev[container].config_needed = NOTHING;
channel = CONTAINER_TO_CHANNEL(container);
id = CONTAINER_TO_ID(container);
lun = CONTAINER_TO_LUN(container);
break;
}
}
if (device_config_needed == NOTHING)
return;
/*
* If we decided that a re-configuration needs to be done,
* schedule it here on the way out the door, please close the door
* behind you.
*/
/*
* Find the scsi_device associated with the SCSI address,
* and mark it as changed, invalidating the cache. This deals
* with changes to existing device IDs.
*/
if (!dev || !dev->scsi_host_ptr)
return;
/*
* force reload of disk info via aac_probe_container
*/
if ((channel == CONTAINER_CHANNEL) &&
(device_config_needed != NOTHING)) {
if (dev->fsa_dev[container].valid == 1)
dev->fsa_dev[container].valid = 2;
aac_probe_container(dev, container);
}
device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
if (device) {
switch (device_config_needed) {
case DELETE:
#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
scsi_remove_device(device);
#else
if (scsi_device_online(device)) {
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
"Device offlined - %s\n",
(channel == CONTAINER_CHANNEL) ?
"array deleted" :
"enclosure services event");
}
#endif
break;
case ADD:
if (!scsi_device_online(device)) {
sdev_printk(KERN_INFO, device,
"Device online - %s\n",
(channel == CONTAINER_CHANNEL) ?
"array created" :
"enclosure services event");
scsi_device_set_state(device, SDEV_RUNNING);
}
/* FALLTHRU */
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
scsi_remove_device(device);
#else
if (!scsi_device_online(device))
break;
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
"Device offlined - %s\n",
"array failed");
#endif
break;
}
scsi_rescan_device(&device->sdev_gendev);
default:
break;
}
scsi_device_put(device);
device_config_needed = NOTHING;
}
if (device_config_needed == ADD)
scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
if (channel == CONTAINER_CHANNEL) {
container++;
device_config_needed = NOTHING;
goto retry_next;
}
}
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
int retval;
struct Scsi_Host *host;
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
int jafo = 0;
/*
* Assumptions:
* - host is locked, unless called by the aacraid thread.
* (a matter of convenience, due to legacy issues surrounding
* eh_host_adapter_reset).
* - in_reset is asserted, so no new i/o is getting to the
* card.
* - The card is dead, or will be very shortly ;-/ so no new
* commands are completing in the interrupt service.
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
aac_adapter_disable_int(aac);
if (aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
kthread_stop(aac->thread);
jafo = 1;
}
/*
* If a positive health, means in a known DEAD PANIC
* state and the adapter could be reset to `try again'.
*/
retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
if (retval)
goto out;
/*
* Loop through the fibs, close the synchronous FIBS
*/
for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
struct fib *fib = &aac->fibs[index];
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
up(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
schedule();
retval = 0;
}
}
/* Give some extra time for ioctls to complete. */
if (retval == 0)
ssleep(2);
index = aac->cardtype;
/*
* Re-initialize the adapter, first free resources, then carefully
* apply the initialization sequence to come back again. Only risk
* is a change in Firmware dropping cache, it is assumed the caller
* will ensure that i/o is queisced and the card is flushed in that
* case.
*/
aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
aac->queues = NULL;
free_irq(aac->pdev->irq, aac);
if (aac->msi)
pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
quirks = aac_get_driver_ident(index)->quirks;
if (quirks & AAC_QUIRK_31BIT) {
if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
goto out;
} else {
if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
goto out;
}
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
goto out;
if (quirks & AAC_QUIRK_31BIT)
if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
goto out;
if (jafo) {
aac->thread = kthread_run(aac_command_thread, aac, "%s",
aac->name);
if (IS_ERR(aac->thread)) {
retval = PTR_ERR(aac->thread);
goto out;
}
}
(void)aac_get_adapter_info(aac);
if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
host->sg_tablesize = 34;
host->max_sectors = (host->sg_tablesize * 8) + 112;
}
if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
host->sg_tablesize = 17;
host->max_sectors = (host->sg_tablesize * 8) + 112;
}
aac_get_config_status(aac, 1);
aac_get_containers(aac);
/*
* This is where the assumption that the Adapter is quiesced
* is important.
*/
command_list = NULL;
__shost_for_each_device(dev, host) {
unsigned long flags;
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list)
if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
command->SCp.buffer = (struct scatterlist *)command_list;
command_list = command;
}
spin_unlock_irqrestore(&dev->list_lock, flags);
}
while ((command = command_list)) {
command_list = (struct scsi_cmnd *)command->SCp.buffer;
command->SCp.buffer = NULL;
command->result = DID_OK << 16
| COMMAND_COMPLETE << 8
| SAM_STAT_TASK_SET_FULL;
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
command->scsi_done(command);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
if (jafo) {
spin_lock_irq(host->host_lock);
}
return retval;
}
int aac_reset_adapter(struct aac_dev * aac, int forced)
{
unsigned long flagv = 0;
int retval;
struct Scsi_Host * host;
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
return -EBUSY;
if (aac->in_reset) {
spin_unlock_irqrestore(&aac->fib_lock, flagv);
return -EBUSY;
}
aac->in_reset = 1;
spin_unlock_irqrestore(&aac->fib_lock, flagv);
/*
* Wait for all commands to complete to this specific
* target (block maximum 60 seconds). Although not necessary,
* it does make us a good storage citizen.
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
if (forced < 2) for (retval = 60; retval; --retval) {
struct scsi_device * dev;
struct scsi_cmnd * command;
int active = 0;
__shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flagv);
list_for_each_entry(command, &dev->cmd_list, list) {
if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
active++;
break;
}
}
spin_unlock_irqrestore(&dev->list_lock, flagv);
if (active)
break;
}
/*
* We can exit If all the commands are complete
*/
if (active == 0)
break;
ssleep(1);
}
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
aac_send_shutdown(aac);
spin_lock_irqsave(host->host_lock, flagv);
retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
spin_unlock_irqrestore(host->host_lock, flagv);
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
if (fibctx) {
struct aac_pause *cmd;
int status;
aac_fib_init(fibctx);
cmd = (struct aac_pause *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_ContainerConfig);
cmd->type = cpu_to_le32(CT_PAUSE_IO);
cmd->timeout = cpu_to_le32(1);
cmd->min = cpu_to_le32(1);
cmd->noRescan = cpu_to_le32(1);
cmd->count = cpu_to_le32(0);
status = aac_fib_send(ContainerCommand,
fibctx,
sizeof(struct aac_pause),
FsaNormal,
-2 /* Timeout silently */, 1,
NULL, NULL);
if (status >= 0)
aac_fib_complete(fibctx);
/* FIB should be freed only after getting
* the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
}
}
return retval;
}
int aac_check_health(struct aac_dev * aac)
{
int BlinkLED;
unsigned long time_now, flagv = 0;
struct list_head * entry;
struct Scsi_Host * host;
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
return 0;
if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
spin_unlock_irqrestore(&aac->fib_lock, flagv);
return 0; /* OK */
}
aac->in_reset = 1;
/* Fake up an AIF:
* aac_aifcmd.command = AifCmdEventNotify = 1
* aac_aifcmd.seqnum = 0xFFFFFFFF
* aac_aifcmd.data[0] = AifEnExpEvent = 23
* aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
* aac.aifcmd.data[2] = AifHighPriority = 3
* aac.aifcmd.data[3] = BlinkLED
*/
time_now = jiffies/HZ;
entry = aac->fib_list.next;
/*
* For each Context that is on the
* fibctxList, make a copy of the
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
while (entry != &aac->fib_list) {
/*
* Extract the fibctx
*/
struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
struct hw_fib * hw_fib;
struct fib * fib;
/*
* Check if the queue is getting
* backlogged
*/
if (fibctx->count > 20) {
/*
* It's *not* jiffies folks,
* but jiffies / HZ, so do not
* panic ...
*/
u32 time_last = fibctx->jiffies;
/*
* Has it been > 2 minutes
* since the last read off
* the queue?
*/
if ((time_now - time_last) > aif_timeout) {
entry = entry->next;
aac_close_fib_context(aac, fibctx);
continue;
}
}
/*
* Warning: no sleep allowed while
* holding spinlock
*/
hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
if (fib && hw_fib) {
struct aac_aifcmd * aif;
fib->hw_fib_va = hw_fib;
fib->dev = aac;
aac_fib_init(fib);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof (struct fib);
fib->data = hw_fib->data;
aif = (struct aac_aifcmd *)hw_fib->data;
aif->command = cpu_to_le32(AifCmdEventNotify);
aif->seqnum = cpu_to_le32(0xFFFFFFFF);
((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
/*
* Put the FIB onto the
* fibctx's fibs
*/
list_add_tail(&fib->fiblink, &fibctx->fib_list);
fibctx->count++;
/*
* Set the event to wake up the
* thread that will waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
kfree(fib);
kfree(hw_fib);
}
entry = entry->next;
}
spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) {
printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
goto out;
}
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
if (!aac_check_reset || ((aac_check_reset == 1) &&
(aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET)))
goto out;
host = aac->scsi_host_ptr;
if (aac->thread->pid != current->pid)
spin_lock_irqsave(host->host_lock, flagv);
BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
if (aac->thread->pid != current->pid)
spin_unlock_irqrestore(host->host_lock, flagv);
return BlinkLED;
out:
aac->in_reset = 0;
return BlinkLED;
}
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
*
* Waits on the commandready event in it's queue. When the event gets set
* it will pull FIBs off it's queue. It will continue to pull FIBs off
* until the queue is empty. When the queue is empty it will wait for
* more FIBs.
*/
int aac_command_thread(void *data)
{
struct aac_dev *dev = data;
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
unsigned long next_jiffies = jiffies + HZ;
unsigned long next_check_jiffies = next_jiffies;
long difference = HZ;
/*
* We can only have one thread per adapter for AIF's.
*/
if (dev->aif_thread)
return -EINVAL;
/*
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
dprintk ((KERN_INFO "aac_command_thread start\n"));
while (1) {
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink);
/*
* We will process the FIB here or pass it to a
* worker thread that is TBD. We Really can't
* do anything at this point since we don't have
* anything defined for this thread to do.
*/
hw_fib = fib->hw_fib_va;
memset(fib, 0, sizeof(struct fib));
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
/*
* We only handle AifRequest fibs from the adapter.
*/
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */
aac_handle_aif(dev, fib);
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
aac_fib_adapter_complete(fib, (u16)sizeof(u32));
} else {
/* The u32 here is important and intended. We are using
32bit wrapping time to fit the adapter field */
u32 time_now, time_last;
unsigned long flagv;
unsigned num;
struct hw_fib ** hw_fib_pool, ** hw_fib_p;
struct fib ** fib_pool, ** fib_p;
/* Sniff events */
if ((aifcmd->command ==
cpu_to_le32(AifCmdEventNotify)) ||
(aifcmd->command ==
cpu_to_le32(AifCmdJobProgress))) {
aac_handle_aif(dev, fib);
}
time_now = jiffies/HZ;
/*
* Warning: no sleep allowed while
* holding spinlock. We take the estimate
* and pre-allocate a set of fibs outside the
* lock.
*/
num = le32_to_cpu(dev->init->AdapterFibsSize)
/ sizeof(struct hw_fib); /* some extra */
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
while (entry != &dev->fib_list) {
entry = entry->next;
++num;
}
spin_unlock_irqrestore(&dev->fib_lock, flagv);
hw_fib_pool = NULL;
fib_pool = NULL;
if (num
&& ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
&& ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (hw_fib_p < &hw_fib_pool[num]) {
if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
--hw_fib_p;
break;
}
if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
kfree(*(--hw_fib_p));
break;
}
}
if ((num = hw_fib_p - hw_fib_pool) == 0) {
kfree(fib_pool);
fib_pool = NULL;
kfree(hw_fib_pool);
hw_fib_pool = NULL;
}
} else {
kfree(hw_fib_pool);
hw_fib_pool = NULL;
}
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
* For each Context that is on the
* fibctxList, make a copy of the
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (entry != &dev->fib_list) {
/*
* Extract the fibctx
*/
fibctx = list_entry(entry, struct aac_fib_context, next);
/*
* Check if the queue is getting
* backlogged
*/
if (fibctx->count > 20)
{
/*
* It's *not* jiffies folks,
* but jiffies / HZ so do not
* panic ...
*/
time_last = fibctx->jiffies;
/*
* Has it been > 2 minutes
* since the last read off
* the queue?
*/
if ((time_now - time_last) > aif_timeout) {
entry = entry->next;
aac_close_fib_context(dev, fibctx);
continue;
}
}
/*
* Warning: no sleep allowed while
* holding spinlock
*/
if (hw_fib_p < &hw_fib_pool[num]) {
hw_newfib = *hw_fib_p;
*(hw_fib_p++) = NULL;
newfib = *fib_p;
*(fib_p++) = NULL;
/*
* Make the copy of the FIB
*/
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
memcpy(newfib, fib, sizeof(struct fib));
newfib->hw_fib_va = hw_newfib;
/*
* Put the FIB onto the
* fibctx's fibs
*/
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
fibctx->count++;
/*
* Set the event to wake up the
* thread that is waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
}
entry = entry->next;
}
/*
* Set the status of this FIB
*/
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
aac_fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
/* Free up the remaining resources */
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
while (hw_fib_p < &hw_fib_pool[num]) {
kfree(*hw_fib_p);
kfree(*fib_p);
++fib_p;
++hw_fib_p;
}
kfree(hw_fib_pool);
kfree(fib_pool);
}
kfree(fib);
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
}
/*
* There are no more AIF's
*/
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
/*
* Background activity
*/
if ((time_before(next_check_jiffies,next_jiffies))
&& ((difference = next_check_jiffies - jiffies) <= 0)) {
next_check_jiffies = next_jiffies;
if (aac_check_health(dev) == 0) {
difference = ((long)(unsigned)check_interval)
* HZ;
next_check_jiffies = jiffies + difference;
} else if (!dev->queues)
break;
}
if (!time_before(next_check_jiffies,next_jiffies)
&& ((difference = next_jiffies - jiffies) <= 0)) {
struct timeval now;
int ret;
/* Don't even try to talk to adapter if its sick */
ret = aac_check_health(dev);
if (!ret && !dev->queues)
break;
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
* HZ;
do_gettimeofday(&now);
/* Synchronize our watches */
if (((1000000 - (1000000 / HZ)) > now.tv_usec)
&& (now.tv_usec > (1000000 / HZ)))
difference = (((1000000 - now.tv_usec) * HZ)
+ 500000) / 1000000;
else if (ret == 0) {
struct fib *fibptr;
if ((fibptr = aac_fib_alloc(dev))) {
int status;
__le32 *info;
aac_fib_init(fibptr);
info = (__le32 *) fib_data(fibptr);
if (now.tv_usec > 500000)
++now.tv_sec;
*info = cpu_to_le32(now.tv_sec);
status = aac_fib_send(SendHostTime,
fibptr,
sizeof(*info),
FsaNormal,
1, 1,
NULL,
NULL);
/* Do not set XferState to zero unless
* receives a response from F/W */
if (status >= 0)
aac_fib_complete(fibptr);
/* FIB should be freed only after
* getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibptr);
}
difference = (long)(unsigned)update_interval*HZ;
} else {
/* retry shortly */
difference = 10 * HZ;
}
next_jiffies = jiffies + difference;
if (time_before(next_check_jiffies,next_jiffies))
difference = next_check_jiffies - jiffies;
}
if (difference <= 0)
difference = 1;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(difference);
if (kthread_should_stop())
break;
}
if (dev->queues)
remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0;
return 0;
}
| gpl-2.0 |
mifl/android_kernel_pantech_msm8974 | arch/arm/mach-msm/board-8930-pmic.c | 1609 | 18932 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <linux/mfd/pm8xxx/pm8038.h>
#include <linux/mfd/pm8xxx/pm8xxx-adc.h>
#include <linux/msm_ssbi.h>
#include <asm/mach-types.h>
#include <mach/msm_bus_board.h>
#include <mach/restart.h>
#include <mach/socinfo.h>
#include "devices.h"
#include "board-8930.h"
struct pm8xxx_gpio_init {
unsigned gpio;
struct pm_gpio config;
};
struct pm8xxx_mpp_init {
unsigned mpp;
struct pm8xxx_mpp_config_data config;
};
#define PM8038_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \
_func, _inv, _disable) \
{ \
.gpio = PM8038_GPIO_PM_TO_SYS(_gpio), \
.config = { \
.direction = _dir, \
.output_buffer = _buf, \
.output_value = _val, \
.pull = _pull, \
.vin_sel = _vin, \
.out_strength = _out_strength, \
.function = _func, \
.inv_int_pol = _inv, \
.disable_pin = _disable, \
} \
}
#define PM8038_MPP_INIT(_mpp, _type, _level, _control) \
{ \
.mpp = PM8038_MPP_PM_TO_SYS(_mpp), \
.config = { \
.type = PM8XXX_MPP_TYPE_##_type, \
.level = _level, \
.control = PM8XXX_MPP_##_control, \
} \
}
#define PM8038_GPIO_DISABLE(_gpio) \
PM8038_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, 0, 0, 0, PM8038_GPIO_VIN_L11, \
0, 0, 0, 1)
#define PM8038_GPIO_OUTPUT(_gpio, _val) \
PM8038_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM8038_GPIO_VIN_L11, \
PM_GPIO_STRENGTH_HIGH, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8038_GPIO_INPUT(_gpio, _pull) \
PM8038_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, \
_pull, PM8038_GPIO_VIN_L11, \
PM_GPIO_STRENGTH_NO, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8038_GPIO_OUTPUT_FUNC(_gpio, _val, _func) \
PM8038_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM8038_GPIO_VIN_L11, \
PM_GPIO_STRENGTH_HIGH, \
_func, 0, 0)
#define PM8038_GPIO_OUTPUT_VIN(_gpio, _val, _vin) \
PM8038_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, _vin, \
PM_GPIO_STRENGTH_HIGH, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8917_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \
_func, _inv, _disable) \
{ \
.gpio = PM8917_GPIO_PM_TO_SYS(_gpio), \
.config = { \
.direction = _dir, \
.output_buffer = _buf, \
.output_value = _val, \
.pull = _pull, \
.vin_sel = _vin, \
.out_strength = _out_strength, \
.function = _func, \
.inv_int_pol = _inv, \
.disable_pin = _disable, \
} \
}
#define PM8917_MPP_INIT(_mpp, _type, _level, _control) \
{ \
.mpp = PM8917_MPP_PM_TO_SYS(_mpp), \
.config = { \
.type = PM8XXX_MPP_TYPE_##_type, \
.level = _level, \
.control = PM8XXX_MPP_##_control, \
} \
}
#define PM8917_GPIO_DISABLE(_gpio) \
PM8917_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, 0, 0, 0, PM_GPIO_VIN_S4, \
0, 0, 0, 1)
#define PM8917_GPIO_OUTPUT(_gpio, _val) \
PM8917_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_HIGH, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8917_GPIO_INPUT(_gpio, _pull) \
PM8917_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, \
_pull, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_NO, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8917_GPIO_OUTPUT_FUNC(_gpio, _val, _func) \
PM8917_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_HIGH, \
_func, 0, 0)
#define PM8917_GPIO_OUTPUT_VIN(_gpio, _val, _vin) \
PM8917_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, _vin, \
PM_GPIO_STRENGTH_HIGH, \
PM_GPIO_FUNC_NORMAL, 0, 0)
/* GPIO and MPP configurations for MSM8930 + PM8038 targets */
/* Initial PM8038 GPIO configurations */
static struct pm8xxx_gpio_init pm8038_gpios[] __initdata = {
/* keys GPIOs */
PM8038_GPIO_INPUT(3, PM_GPIO_PULL_UP_30),
PM8038_GPIO_INPUT(8, PM_GPIO_PULL_UP_30),
PM8038_GPIO_INPUT(10, PM_GPIO_PULL_UP_30),
PM8038_GPIO_INPUT(11, PM_GPIO_PULL_UP_30),
/* haptics gpio */
PM8038_GPIO_OUTPUT_FUNC(7, 0, PM_GPIO_FUNC_1),
/* MHL PWR EN */
PM8038_GPIO_OUTPUT_VIN(5, 1, PM8038_GPIO_VIN_VPH),
};
/* Initial PM8038 MPP configurations */
static struct pm8xxx_mpp_init pm8038_mpps[] __initdata = {
};
/* GPIO and MPP configurations for MSM8930 + PM8917 targets */
/* Initial PM8917 GPIO configurations */
static struct pm8xxx_gpio_init pm8917_gpios[] __initdata = {
/* Backlight enable control */
PM8917_GPIO_OUTPUT(24, 1),
/* keys GPIOs */
PM8917_GPIO_INPUT(27, PM_GPIO_PULL_UP_30),
PM8917_GPIO_INPUT(28, PM_GPIO_PULL_UP_30),
PM8917_GPIO_INPUT(36, PM_GPIO_PULL_UP_30),
PM8917_GPIO_INPUT(37, PM_GPIO_PULL_UP_30),
/* haptics gpio */
PM8917_GPIO_OUTPUT_FUNC(38, 0, PM_GPIO_FUNC_2),
/* MHL PWR EN */
PM8917_GPIO_OUTPUT_VIN(25, 1, PM_GPIO_VIN_VPH),
};
/* Initial PM8917 MPP configurations */
static struct pm8xxx_mpp_init pm8917_mpps[] __initdata = {
PM8917_MPP_INIT(PM8XXX_AMUX_MPP_3, A_INPUT,
PM8XXX_MPP_AIN_AMUX_CH8, DIN_TO_INT),
/* Configure MPP01 for USB ID detection */
PM8917_MPP_INIT(1, D_INPUT, PM8921_MPP_DIG_LEVEL_S4, DIN_TO_INT),
};
void __init msm8930_pm8038_gpio_mpp_init(void)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(pm8038_gpios); i++) {
rc = pm8xxx_gpio_config(pm8038_gpios[i].gpio,
&pm8038_gpios[i].config);
if (rc) {
pr_err("%s: pm8xxx_gpio_config: rc=%d\n", __func__, rc);
break;
}
}
/* Initial MPP configuration. */
for (i = 0; i < ARRAY_SIZE(pm8038_mpps); i++) {
rc = pm8xxx_mpp_config(pm8038_mpps[i].mpp,
&pm8038_mpps[i].config);
if (rc) {
pr_err("%s: pm8xxx_mpp_config: rc=%d\n", __func__, rc);
break;
}
}
}
void __init msm8930_pm8917_gpio_mpp_init(void)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(pm8917_gpios); i++) {
rc = pm8xxx_gpio_config(pm8917_gpios[i].gpio,
&pm8917_gpios[i].config);
if (rc) {
pr_err("%s: pm8xxx_gpio_config: rc=%d\n", __func__, rc);
break;
}
}
/* Initial MPP configuration. */
for (i = 0; i < ARRAY_SIZE(pm8917_mpps); i++) {
rc = pm8xxx_mpp_config(pm8917_mpps[i].mpp,
&pm8917_mpps[i].config);
if (rc) {
pr_err("%s: pm8xxx_mpp_config: rc=%d\n", __func__, rc);
break;
}
}
}
static struct pm8xxx_adc_amux pm8038_adc_channels_data[] = {
{"vcoin", CHANNEL_VCOIN, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vbat", CHANNEL_VBAT, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"dcin", CHANNEL_DCIN, CHAN_PATH_SCALING4, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ichg", CHANNEL_ICHG, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vph_pwr", CHANNEL_VPH_PWR, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ibat", CHANNEL_IBAT, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"batt_therm", CHANNEL_BATT_THERM, CHAN_PATH_SCALING1, AMUX_RSV2,
ADC_DECIMATION_TYPE2, ADC_SCALE_BATT_THERM},
{"batt_id", CHANNEL_BATT_ID, CHAN_PATH_SCALING1, AMUX_RSV2,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"usbin", CHANNEL_USBIN, CHAN_PATH_SCALING3, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"pmic_therm", CHANNEL_DIE_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PMIC_THERM},
{"625mv", CHANNEL_625MV, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"125v", CHANNEL_125V, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"chg_temp", CHANNEL_CHG_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"pa_therm1", ADC_MPP_1_AMUX4, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PA_THERM},
{"xo_therm", CHANNEL_MUXOFF, CHAN_PATH_SCALING1, AMUX_RSV0,
ADC_DECIMATION_TYPE2, ADC_SCALE_XOTHERM},
{"pa_therm0", ADC_MPP_1_AMUX3, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PA_THERM},
};
static struct pm8xxx_adc_properties pm8038_adc_data = {
.adc_vdd_reference = 1800, /* milli-voltage for this adc */
.bitresolution = 15,
.bipolar = 0,
};
static struct pm8xxx_adc_platform_data pm8038_adc_pdata = {
.adc_channel = pm8038_adc_channels_data,
.adc_num_board_channel = ARRAY_SIZE(pm8038_adc_channels_data),
.adc_prop = &pm8038_adc_data,
.adc_mpp_base = PM8038_MPP_PM_TO_SYS(1),
};
static struct pm8xxx_irq_platform_data pm8xxx_irq_pdata __devinitdata = {
.irq_base = PM8038_IRQ_BASE,
.devirq = MSM_GPIO_TO_INT(104),
.irq_trigger_flag = IRQF_TRIGGER_LOW,
};
static struct pm8xxx_gpio_platform_data pm8xxx_gpio_pdata __devinitdata = {
.gpio_base = PM8038_GPIO_PM_TO_SYS(1),
};
static struct pm8xxx_mpp_platform_data pm8xxx_mpp_pdata __devinitdata = {
.mpp_base = PM8038_MPP_PM_TO_SYS(1),
};
static struct pm8xxx_rtc_platform_data pm8xxx_rtc_pdata __devinitdata = {
.rtc_write_enable = false,
.rtc_alarm_powerup = false,
};
static struct pm8xxx_pwrkey_platform_data pm8xxx_pwrkey_pdata = {
.pull_up = 1,
.kpd_trigger_delay_us = 15625,
.wakeup = 1,
};
static int pm8921_therm_mitigation[] = {
1100,
700,
600,
325,
};
#define MAX_VOLTAGE_MV 4200
#define CHG_TERM_MA 100
static struct pm8921_charger_platform_data pm8921_chg_pdata __devinitdata = {
.update_time = 60000,
.max_voltage = MAX_VOLTAGE_MV,
.min_voltage = 3200,
.uvd_thresh_voltage = 4050,
.alarm_low_mv = 3400,
.alarm_high_mv = 4000,
.resume_voltage_delta = 60,
.resume_charge_percent = 99,
.term_current = CHG_TERM_MA,
.cool_temp = 10,
.warm_temp = 45,
.temp_check_period = 1,
.max_bat_chg_current = 1100,
.cool_bat_chg_current = 350,
.warm_bat_chg_current = 350,
.cool_bat_voltage = 4100,
.warm_bat_voltage = 4100,
.thermal_mitigation = pm8921_therm_mitigation,
.thermal_levels = ARRAY_SIZE(pm8921_therm_mitigation),
.led_src_config = LED_SRC_VPH_PWR,
.rconn_mohm = 18,
};
#define PM8038_WLED_MAX_CURRENT 25
#define PM8XXX_LED_PWM_PERIOD 1000
#define PM8XXX_LED_PWM_DUTY_MS 20
#define PM8038_RGB_LED_MAX_CURRENT 12
static struct led_info pm8038_led_info[] = {
[0] = {
.name = "wled",
.default_trigger = "bkl_trigger",
},
[1] = {
.name = "led:rgb_red",
.default_trigger = "battery-charging",
},
[2] = {
.name = "led:rgb_green",
},
[3] = {
.name = "led:rgb_blue",
},
};
static struct led_platform_data pm8038_led_core_pdata = {
.num_leds = ARRAY_SIZE(pm8038_led_info),
.leds = pm8038_led_info,
};
static struct wled_config_data wled_cfg = {
.dig_mod_gen_en = true,
.cs_out_en = true,
.ctrl_delay_us = 0,
.op_fdbck = true,
.ovp_val = WLED_OVP_32V,
.boost_curr_lim = WLED_CURR_LIMIT_525mA,
.strings = WLED_SECOND_STRING,
};
static int pm8038_led0_pwm_duty_pcts[56] = {
1, 4, 8, 12, 16, 20, 24, 28, 32, 36,
40, 44, 46, 52, 56, 60, 64, 68, 72, 76,
80, 84, 88, 92, 96, 100, 100, 100, 98, 95,
92, 88, 84, 82, 78, 74, 70, 66, 62, 58,
58, 54, 50, 48, 42, 38, 34, 30, 26, 22,
14, 10, 6, 4, 1
};
/*
* Note: There is a bug in LPG module that results in incorrect
* behavior of pattern when LUT index 0 is used. So effectively
* there are 63 usable LUT entries.
*/
static struct pm8xxx_pwm_duty_cycles pm8038_led0_pwm_duty_cycles = {
.duty_pcts = (int *)&pm8038_led0_pwm_duty_pcts,
.num_duty_pcts = ARRAY_SIZE(pm8038_led0_pwm_duty_pcts),
.duty_ms = PM8XXX_LED_PWM_DUTY_MS,
.start_idx = 1,
};
static struct pm8xxx_led_config pm8038_led_configs[] = {
[0] = {
.id = PM8XXX_ID_WLED,
.mode = PM8XXX_LED_MODE_MANUAL,
.max_current = PM8038_WLED_MAX_CURRENT,
.default_state = 0,
.wled_cfg = &wled_cfg,
},
[1] = {
.id = PM8XXX_ID_RGB_LED_RED,
.mode = PM8XXX_LED_MODE_PWM1,
.max_current = PM8038_RGB_LED_MAX_CURRENT,
.pwm_channel = 5,
.pwm_period_us = PM8XXX_LED_PWM_PERIOD,
.pwm_duty_cycles = &pm8038_led0_pwm_duty_cycles,
},
[2] = {
.id = PM8XXX_ID_RGB_LED_GREEN,
.mode = PM8XXX_LED_MODE_PWM1,
.max_current = PM8038_RGB_LED_MAX_CURRENT,
.pwm_channel = 4,
.pwm_period_us = PM8XXX_LED_PWM_PERIOD,
.pwm_duty_cycles = &pm8038_led0_pwm_duty_cycles,
},
[3] = {
.id = PM8XXX_ID_RGB_LED_BLUE,
.mode = PM8XXX_LED_MODE_PWM1,
.max_current = PM8038_RGB_LED_MAX_CURRENT,
.pwm_channel = 3,
.pwm_period_us = PM8XXX_LED_PWM_PERIOD,
.pwm_duty_cycles = &pm8038_led0_pwm_duty_cycles,
},
};
static struct pm8xxx_led_platform_data pm8xxx_leds_pdata = {
.led_core = &pm8038_led_core_pdata,
.configs = pm8038_led_configs,
.num_configs = ARRAY_SIZE(pm8038_led_configs),
};
static struct pm8xxx_ccadc_platform_data pm8xxx_ccadc_pdata = {
.r_sense_uohm = 10000,
.calib_delay_ms = 600000,
};
static struct pm8xxx_misc_platform_data pm8xxx_misc_pdata = {
.priority = 0,
};
/*
* 0x254=0xC8 (Threshold=110, preamp bias=01)
* 0x255=0xC1 (Hold=110, max attn=0000, mute=1)
* 0x256=0xB0 (decay=101, attack=10, delay=0)
*/
static struct pm8xxx_spk_platform_data pm8xxx_spk_pdata = {
.spk_add_enable = false,
.cd_ng_threshold = 0x0,
.cd_nf_preamp_bias = 0x1,
.cd_ng_hold = 0x6,
.cd_ng_max_atten = 0x0,
.noise_mute = 1,
.cd_ng_decay_rate = 0x5,
.cd_ng_attack_rate = 0x2,
.cd_delay = 0x0,
};
static struct pm8921_bms_platform_data pm8921_bms_pdata __devinitdata = {
.battery_type = BATT_UNKNOWN,
.r_sense_uohm = 10000,
.v_cutoff = 3400,
.max_voltage_uv = MAX_VOLTAGE_MV * 1000,
.shutdown_soc_valid_limit = 20,
.adjust_soc_low_threshold = 25,
.chg_term_ua = CHG_TERM_MA * 1000,
.rconn_mohm = 18,
.normal_voltage_calc_ms = 20000,
.low_voltage_calc_ms = 1000,
.alarm_low_mv = 3400,
.alarm_high_mv = 4000,
.high_ocv_correction_limit_uv = 50,
.low_ocv_correction_limit_uv = 100,
.hold_soc_est = 3,
.enable_fcc_learning = 1,
.min_fcc_learning_soc = 20,
.min_fcc_ocv_pc = 30,
.min_fcc_learning_samples = 5,
};
static struct pm8038_platform_data pm8038_platform_data __devinitdata = {
.irq_pdata = &pm8xxx_irq_pdata,
.gpio_pdata = &pm8xxx_gpio_pdata,
.mpp_pdata = &pm8xxx_mpp_pdata,
.rtc_pdata = &pm8xxx_rtc_pdata,
.pwrkey_pdata = &pm8xxx_pwrkey_pdata,
.misc_pdata = &pm8xxx_misc_pdata,
.regulator_pdatas = msm8930_pm8038_regulator_pdata,
.charger_pdata = &pm8921_chg_pdata,
.bms_pdata = &pm8921_bms_pdata,
.adc_pdata = &pm8038_adc_pdata,
.leds_pdata = &pm8xxx_leds_pdata,
.ccadc_pdata = &pm8xxx_ccadc_pdata,
.spk_pdata = &pm8xxx_spk_pdata,
};
static struct msm_ssbi_platform_data msm8930_ssbi_pm8038_pdata __devinitdata = {
.controller_type = MSM_SBI_CTRL_PMIC_ARBITER,
.slave = {
.name = "pm8038-core",
.platform_data = &pm8038_platform_data,
},
};
/* PM8917 platform data */
static struct pm8xxx_adc_amux pm8917_adc_channels_data[] = {
{"vcoin", CHANNEL_VCOIN, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vbat", CHANNEL_VBAT, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"dcin", CHANNEL_DCIN, CHAN_PATH_SCALING4, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ichg", CHANNEL_ICHG, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vph_pwr", CHANNEL_VPH_PWR, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ibat", CHANNEL_IBAT, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"batt_therm", CHANNEL_BATT_THERM, CHAN_PATH_SCALING1, AMUX_RSV2,
ADC_DECIMATION_TYPE2, ADC_SCALE_BATT_THERM},
{"batt_id", CHANNEL_BATT_ID, CHAN_PATH_SCALING1, AMUX_RSV2,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"usbin", CHANNEL_USBIN, CHAN_PATH_SCALING3, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"pmic_therm", CHANNEL_DIE_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PMIC_THERM},
{"625mv", CHANNEL_625MV, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"125v", CHANNEL_125V, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"chg_temp", CHANNEL_CHG_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"xo_therm", CHANNEL_MUXOFF, CHAN_PATH_SCALING1, AMUX_RSV0,
ADC_DECIMATION_TYPE2, ADC_SCALE_XOTHERM},
{"pa_therm0", ADC_MPP_1_AMUX3, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PA_THERM},
{"pa_therm1", ADC_MPP_1_AMUX8, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PA_THERM},
};
static struct pm8xxx_adc_properties pm8917_adc_data = {
.adc_vdd_reference = 1800, /* milli-voltage for this adc */
.bitresolution = 15,
.bipolar = 0,
};
static struct pm8xxx_adc_platform_data pm8917_adc_pdata = {
.adc_channel = pm8917_adc_channels_data,
.adc_num_board_channel = ARRAY_SIZE(pm8917_adc_channels_data),
.adc_prop = &pm8917_adc_data,
.adc_mpp_base = PM8917_MPP_PM_TO_SYS(1),
};
static struct pm8921_platform_data pm8917_platform_data __devinitdata = {
.irq_pdata = &pm8xxx_irq_pdata,
.gpio_pdata = &pm8xxx_gpio_pdata,
.mpp_pdata = &pm8xxx_mpp_pdata,
.rtc_pdata = &pm8xxx_rtc_pdata,
.pwrkey_pdata = &pm8xxx_pwrkey_pdata,
.misc_pdata = &pm8xxx_misc_pdata,
.regulator_pdatas = msm8930_pm8917_regulator_pdata,
.charger_pdata = &pm8921_chg_pdata,
.bms_pdata = &pm8921_bms_pdata,
.adc_pdata = &pm8917_adc_pdata,
.ccadc_pdata = &pm8xxx_ccadc_pdata,
};
static struct msm_ssbi_platform_data msm8930_ssbi_pm8917_pdata __devinitdata = {
.controller_type = MSM_SBI_CTRL_PMIC_ARBITER,
.slave = {
.name = "pm8921-core",
.platform_data = &pm8917_platform_data,
},
};
void __init msm8930_init_pmic(void)
{
if (socinfo_get_pmic_model() != PMIC_MODEL_PM8917) {
/* PM8038 configuration */
pmic_reset_irq = PM8038_IRQ_BASE + PM8038_RESOUT_IRQ;
msm8960_device_ssbi_pmic.dev.platform_data =
&msm8930_ssbi_pm8038_pdata;
pm8038_platform_data.num_regulators
= msm8930_pm8038_regulator_pdata_len;
if (machine_is_msm8930_mtp())
pm8921_bms_pdata.battery_type = BATT_PALLADIUM;
else if (machine_is_msm8930_cdp())
pm8921_chg_pdata.has_dc_supply = true;
} else {
/* PM8917 configuration */
pmic_reset_irq = PM8917_IRQ_BASE + PM8921_RESOUT_IRQ;
msm8960_device_ssbi_pmic.dev.platform_data =
&msm8930_ssbi_pm8917_pdata;
pm8917_platform_data.num_regulators
= msm8930_pm8917_regulator_pdata_len;
if (machine_is_msm8930_mtp())
pm8921_bms_pdata.battery_type = BATT_PALLADIUM;
else if (machine_is_msm8930_cdp())
pm8921_chg_pdata.has_dc_supply = true;
}
if (!machine_is_msm8930_mtp())
pm8921_chg_pdata.battery_less_hardware = 1;
}
| gpl-2.0 |
fortunave3gxx/android_kernel_samsung_fortuna-common | drivers/staging/media/lirc/lirc_zilog.c | 1865 | 41712 | /*
* i2c IR lirc driver for devices with zilog IR processors
*
* Copyright (c) 2000 Gerd Knorr <kraxel@goldbach.in-berlin.de>
* modified for PixelView (BT878P+W/FM) by
* Michal Kochanowicz <mkochano@pld.org.pl>
* Christoph Bartelmus <lirc@bartelmus.de>
* modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by
* Ulrich Mueller <ulrich.mueller42@web.de>
* modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by
* Stefan Jahn <stefan@lkcc.org>
* modified for inclusion into kernel sources by
* Jerome Brock <jbrock@users.sourceforge.net>
* modified for Leadtek Winfast PVR2000 by
* Thomas Reitmayr (treitmayr@yahoo.com)
* modified for Hauppauge PVR-150 IR TX device by
* Mark Weaver <mark@npsl.co.uk>
* changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150
* Jarod Wilson <jarod@redhat.com>
*
* parts are cut&pasted from the lirc_i2c.c driver
*
* Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
* Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <media/lirc_dev.h>
#include <media/lirc.h>
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
struct IR;
struct IR_rx {
struct kref ref;
struct IR *ir;
/* RX device */
struct mutex client_lock;
struct i2c_client *c;
/* RX polling thread data */
struct task_struct *task;
/* RX read data */
unsigned char b[3];
bool hdpvr_data_fmt;
};
struct IR_tx {
struct kref ref;
struct IR *ir;
/* TX device */
struct mutex client_lock;
struct i2c_client *c;
/* TX additional actions needed */
int need_boot;
bool post_tx_ready_poll;
};
struct IR {
struct kref ref;
struct list_head list;
/* FIXME spinlock access to l.features */
struct lirc_driver l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
struct IR_rx *rx;
spinlock_t tx_ref_lock; /* struct IR_tx kref get()/put() */
struct IR_tx *tx;
};
/* IR transceiver instance object list */
/*
* This lock is used for the following:
* a. ir_devices_list access, insertions, deletions
* b. struct IR kref get()s and put()s
* c. serialization of ir_probe() for the two i2c_clients for a Z8
*/
static DEFINE_MUTEX(ir_devices_lock);
static LIST_HEAD(ir_devices_list);
/* Block size for IR transmitter */
#define TX_BLOCK_SIZE 99
/* Hauppauge IR transmitter data */
struct tx_data_struct {
/* Boot block */
unsigned char *boot_data;
/* Start of binary data block */
unsigned char *datap;
/* End of binary data block */
unsigned char *endp;
/* Number of installed codesets */
unsigned int num_code_sets;
/* Pointers to codesets */
unsigned char **code_sets;
/* Global fixed data template */
int fixed[TX_BLOCK_SIZE];
};
static struct tx_data_struct *tx_data;
static struct mutex tx_data_lock;
#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
## args)
#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static bool debug; /* debug output */
static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
do { \
if (debug) \
printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
## args); \
} while (0)
/* struct IR reference counting */
static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
if (ir_devices_lock_held) {
kref_get(&ir->ref);
} else {
mutex_lock(&ir_devices_lock);
kref_get(&ir->ref);
mutex_unlock(&ir_devices_lock);
}
return ir;
}
static void release_ir_device(struct kref *ref)
{
struct IR *ir = container_of(ref, struct IR, ref);
/*
* Things should be in this state by now:
* ir->rx set to NULL and deallocated - happens before ir->rx->ir put()
* ir->rx->task kthread stopped - happens before ir->rx->ir put()
* ir->tx set to NULL and deallocated - happens before ir->tx->ir put()
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
lirc_unregister_driver(ir->l.minor);
ir->l.minor = MAX_IRCTL_DEVICES;
}
if (ir->rbuf.fifo_initialized)
lirc_buffer_free(&ir->rbuf);
list_del(&ir->list);
kfree(ir);
}
static int put_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
int released;
if (ir_devices_lock_held)
return kref_put(&ir->ref, release_ir_device);
mutex_lock(&ir_devices_lock);
released = kref_put(&ir->ref, release_ir_device);
mutex_unlock(&ir_devices_lock);
return released;
}
/* struct IR_rx reference counting */
static struct IR_rx *get_ir_rx(struct IR *ir)
{
struct IR_rx *rx;
spin_lock(&ir->rx_ref_lock);
rx = ir->rx;
if (rx != NULL)
kref_get(&rx->ref);
spin_unlock(&ir->rx_ref_lock);
return rx;
}
static void destroy_rx_kthread(struct IR_rx *rx, bool ir_devices_lock_held)
{
/* end up polling thread */
if (!IS_ERR_OR_NULL(rx->task)) {
kthread_stop(rx->task);
rx->task = NULL;
/* Put the ir ptr that ir_probe() gave to the rx poll thread */
put_ir_device(rx->ir, ir_devices_lock_held);
}
}
static void release_ir_rx(struct kref *ref)
{
struct IR_rx *rx = container_of(ref, struct IR_rx, ref);
struct IR *ir = rx->ir;
/*
* This release function can't do all the work, as we want
* to keep the rx_ref_lock a spinlock, and killing the poll thread
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
return;
}
static int put_ir_rx(struct IR_rx *rx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = rx->ir;
spin_lock(&ir->rx_ref_lock);
released = kref_put(&rx->ref, release_ir_rx);
spin_unlock(&ir->rx_ref_lock);
/* Destroy the rx kthread while not holding the spinlock */
if (released) {
destroy_rx_kthread(rx, ir_devices_lock_held);
kfree(rx);
/* Make sure we're not still in a poll_table somewhere */
wake_up_interruptible(&ir->rbuf.wait_poll);
}
/* Do a reference put() for the rx->ir reference, if we released rx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
/* struct IR_tx reference counting */
static struct IR_tx *get_ir_tx(struct IR *ir)
{
struct IR_tx *tx;
spin_lock(&ir->tx_ref_lock);
tx = ir->tx;
if (tx != NULL)
kref_get(&tx->ref);
spin_unlock(&ir->tx_ref_lock);
return tx;
}
static void release_ir_tx(struct kref *ref)
{
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
ir->l.features &= ~LIRC_CAN_SEND_PULSE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
}
static int put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = tx->ir;
spin_lock(&ir->tx_ref_lock);
released = kref_put(&tx->ref, release_ir_tx);
spin_unlock(&ir->tx_ref_lock);
/* Do a reference put() for the tx->ir reference, if we released tx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
static int add_to_buf(struct IR *ir)
{
__u16 code;
unsigned char codes[2];
unsigned char keybuf[6];
int got_data = 0;
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
struct lirc_buffer *rbuf = ir->l.rbuf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
dprintk("buffer overflow\n");
return -EOVERFLOW;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/* Ensure our rx->c i2c_client remains valid for the duration */
mutex_lock(&rx->client_lock);
if (rx->c == NULL) {
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
return -ENXIO;
}
tx = get_ir_tx(ir);
/*
* service the device as long as it is returning
* data and we have space
*/
do {
if (kthread_should_stop()) {
ret = -ENODATA;
break;
}
/*
* Lock i2c bus for the duration. RX/TX chips interfere so
* this is worth it
*/
mutex_lock(&ir->ir_lock);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
/*
* Send random "poll command" (?) Windows driver does this
* and it is a good point to detect chip failure.
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
zilog_error("unable to read from the IR chip "
"after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
zilog_error("polling the IR receiver chip failed, "
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
schedule_timeout((100 * HZ + 999) / 1000);
if (tx != NULL)
tx->need_boot = 1;
++failures;
mutex_unlock(&ir->ir_lock);
ret = 0;
continue;
}
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
zilog_error("i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
}
/* key pressed ? */
if (rx->hdpvr_data_fmt) {
if (got_data && (keybuf[0] == 0x80)) {
ret = 0;
break;
} else if (got_data && (keybuf[0] == 0x00)) {
ret = -ENODATA;
break;
}
} else if ((rx->b[0] & 0x80) == 0) {
ret = got_data ? 0 : -ENODATA;
break;
}
/* look what we have */
code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
codes[0] = (code >> 8) & 0xff;
codes[1] = code & 0xff;
/* return it */
lirc_buffer_write(rbuf, codes);
++got_data;
ret = 0;
} while (!lirc_buffer_full(rbuf));
mutex_unlock(&rx->client_lock);
if (tx != NULL)
put_ir_tx(tx, false);
put_ir_rx(rx, false);
return ret;
}
/*
* Main function of the polling thread -- from lirc_dev.
* We don't fit the LIRC model at all anymore. This is horrible, but
* basically we have a single RX/TX device with a nasty failure mode
* that needs to be accounted for across the pair. lirc lets us provide
* fops, but prevents us from using the internal polling, etc. if we do
* so. Hence the replication. Might be neater to extend the LIRC model
* to account for this but I'd think it's a very special case of seriously
* messed up hardware.
*/
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
struct lirc_buffer *rbuf = ir->l.rbuf;
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
* it (bad!). This is therefore just sufficient to catch all
* of the button presses. It makes the remote much more
* responsive. You can see the difference by running irw and
* holding down a button. With 100ms, the old polling
* interval, you'll notice breaks in the repeat sequence
* corresponding to lost keypresses.
*/
schedule_timeout((260 * HZ) / 1000);
if (kthread_should_stop())
break;
if (!add_to_buf(ir))
wake_up_interruptible(&rbuf->wait_poll);
}
dprintk("poll thread ended\n");
return 0;
}
static int set_use_inc(void *data)
{
return 0;
}
static void set_use_dec(void *data)
{
return;
}
/* safe read of a uint32 (always network byte order) */
static int read_uint32(unsigned char **data,
unsigned char *endp, unsigned int *val)
{
if (*data + 4 > endp)
return 0;
*val = ((*data)[0] << 24) | ((*data)[1] << 16) |
((*data)[2] << 8) | (*data)[3];
*data += 4;
return 1;
}
/* safe read of a uint8 */
static int read_uint8(unsigned char **data,
unsigned char *endp, unsigned char *val)
{
if (*data + 1 > endp)
return 0;
*val = *((*data)++);
return 1;
}
/* safe skipping of N bytes */
static int skip(unsigned char **data,
unsigned char *endp, unsigned int distance)
{
if (*data + distance > endp)
return 0;
*data += distance;
return 1;
}
/* decompress key data into the given buffer */
static int get_key_data(unsigned char *buf,
unsigned int codeset, unsigned int key)
{
unsigned char *data, *endp, *diffs, *key_block;
unsigned char keys, ndiffs, id;
unsigned int base, lim, pos, i;
/* Binary search for the codeset */
for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) {
pos = base + (lim >> 1);
data = tx_data->code_sets[pos];
if (!read_uint32(&data, tx_data->endp, &i))
goto corrupt;
if (i == codeset)
break;
else if (codeset > i) {
base = pos + 1;
--lim;
}
}
/* Not found? */
if (!lim)
return -EPROTO;
/* Set end of data block */
endp = pos < tx_data->num_code_sets - 1 ?
tx_data->code_sets[pos + 1] : tx_data->endp;
/* Read the block header */
if (!read_uint8(&data, endp, &keys) ||
!read_uint8(&data, endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* Save diffs & skip */
diffs = data;
if (!skip(&data, endp, ndiffs))
goto corrupt;
/* Read the id of the first key */
if (!read_uint8(&data, endp, &id))
goto corrupt;
/* Unpack the first key's data */
for (i = 0; i < TX_BLOCK_SIZE; ++i) {
if (tx_data->fixed[i] == -1) {
if (!read_uint8(&data, endp, &buf[i]))
goto corrupt;
} else {
buf[i] = (unsigned char)tx_data->fixed[i];
}
}
/* Early out key found/not found */
if (key == id)
return 0;
if (keys == 1)
return -EPROTO;
/* Sanity check */
key_block = data;
if (!skip(&data, endp, (keys - 1) * (ndiffs + 1)))
goto corrupt;
/* Binary search for the key */
for (base = 0, lim = keys - 1; lim; lim >>= 1) {
/* Seek to block */
unsigned char *key_data;
pos = base + (lim >> 1);
key_data = key_block + (ndiffs + 1) * pos;
if (*key_data == key) {
/* skip key id */
++key_data;
/* found, so unpack the diffs */
for (i = 0; i < ndiffs; ++i) {
unsigned char val;
if (!read_uint8(&key_data, endp, &val) ||
diffs[i] >= TX_BLOCK_SIZE)
goto corrupt;
buf[diffs[i]] = val;
}
return 0;
} else if (key > *key_data) {
base = pos + 1;
--lim;
}
}
/* Key not found */
return -EPROTO;
corrupt:
zilog_error("firmware is corrupt\n");
return -EFAULT;
}
/* send a block of data to the IR TX device */
static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
{
int i, j, ret;
unsigned char buf[5];
for (i = 0; i < TX_BLOCK_SIZE;) {
int tosend = TX_BLOCK_SIZE - i;
if (tosend > 4)
tosend = 4;
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
dprintk("%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
}
return 0;
}
/* send boot data to the IR TX device */
static int send_boot_data(struct IR_tx *tx)
{
int ret, i;
unsigned char buf[4];
/* send the boot block */
ret = send_data_block(tx, tx_data->boot_data);
if (ret != 0)
return ret;
/* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* Wait for zilog to settle after hitting go post boot block upload.
* Without this delay, the HD-PVR and HVR-1950 both return an -EIO
* upon attempting to get firmware revision, and tx probe thus fails.
*/
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
"%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
return 0;
}
/* unload "firmware", lock held */
static void fw_unload_locked(void)
{
if (tx_data) {
if (tx_data->code_sets)
vfree(tx_data->code_sets);
if (tx_data->datap)
vfree(tx_data->datap);
vfree(tx_data);
tx_data = NULL;
dprintk("successfully unloaded IR blaster firmware\n");
}
}
/* unload "firmware" for the IR TX device */
static void fw_unload(void)
{
mutex_lock(&tx_data_lock);
fw_unload_locked();
mutex_unlock(&tx_data_lock);
}
/* load "firmware" for the IR TX device */
static int fw_load(struct IR_tx *tx)
{
int ret;
unsigned int i;
unsigned char *data, version, num_global_fixed;
const struct firmware *fw_entry;
/* Already loaded? */
mutex_lock(&tx_data_lock);
if (tx_data) {
ret = 0;
goto out;
}
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
zilog_error("firmware haup-ir-blaster.bin not available "
"(%d)\n", ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
dprintk("firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
if (tx_data == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
}
tx_data->code_sets = NULL;
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
if (tx_data->datap == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
goto out;
}
memcpy(tx_data->datap, fw_entry->data, fw_entry->size);
tx_data->endp = tx_data->datap + fw_entry->size;
release_firmware(fw_entry); fw_entry = NULL;
/* Check version */
data = tx_data->datap;
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
zilog_error("unsupported code set file version (%u, expected"
"1) -- please upgrade to a newer driver",
version);
fw_unload_locked();
ret = -EFAULT;
goto out;
}
/* Save boot block for later */
tx_data->boot_data = data;
if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE))
goto corrupt;
if (!read_uint32(&data, tx_data->endp,
&tx_data->num_code_sets))
goto corrupt;
dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
if (tx_data->code_sets == NULL) {
fw_unload_locked();
ret = -ENOMEM;
goto out;
}
for (i = 0; i < TX_BLOCK_SIZE; ++i)
tx_data->fixed[i] = -1;
/* Read global fixed data template */
if (!read_uint8(&data, tx_data->endp, &num_global_fixed) ||
num_global_fixed > TX_BLOCK_SIZE)
goto corrupt;
for (i = 0; i < num_global_fixed; ++i) {
unsigned char pos, val;
if (!read_uint8(&data, tx_data->endp, &pos) ||
!read_uint8(&data, tx_data->endp, &val) ||
pos >= TX_BLOCK_SIZE)
goto corrupt;
tx_data->fixed[pos] = (int)val;
}
/* Filch out the position of each code set */
for (i = 0; i < tx_data->num_code_sets; ++i) {
unsigned int id;
unsigned char keys;
unsigned char ndiffs;
/* Save the codeset position */
tx_data->code_sets[i] = data;
/* Read header */
if (!read_uint32(&data, tx_data->endp, &id) ||
!read_uint8(&data, tx_data->endp, &keys) ||
!read_uint8(&data, tx_data->endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* skip diff positions */
if (!skip(&data, tx_data->endp, ndiffs))
goto corrupt;
/*
* After the diffs we have the first key id + data -
* global fixed
*/
if (!skip(&data, tx_data->endp,
1 + TX_BLOCK_SIZE - num_global_fixed))
goto corrupt;
/* Then we have keys-1 blocks of key id+diffs */
if (!skip(&data, tx_data->endp,
(ndiffs + 1) * (keys - 1)))
goto corrupt;
}
ret = 0;
goto out;
corrupt:
zilog_error("firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
out:
mutex_unlock(&tx_data_lock);
return ret;
}
/* copied from lirc_dev */
static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
dprintk("read called\n");
if (n % rbuf->chunk_size) {
dprintk("read result = -EINVAL\n");
return -EINVAL;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/*
* we add ourselves to the task queue before buffer check
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
add_wait_queue(&rbuf->wait_poll, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < n && ret == 0) {
if (lirc_buffer_empty(rbuf)) {
/*
* According to the read(2) man page, 'written' can be
* returned as less than 'n', instead of blocking
* again, returning -EWOULDBLOCK, or returning
* -ERESTARTSYS
*/
if (written)
break;
if (filep->f_flags & O_NONBLOCK) {
ret = -EWOULDBLOCK;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
zilog_error("chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
break;
}
m = lirc_buffer_read(rbuf, buf);
if (m == rbuf->chunk_size) {
ret = copy_to_user((void *)outbuf+written, buf,
rbuf->chunk_size);
written += rbuf->chunk_size;
} else {
retries++;
}
if (retries >= 5) {
zilog_error("Buffer read failed!\n");
ret = -EIO;
}
}
}
remove_wait_queue(&rbuf->wait_poll, &wait);
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
dprintk("read result = %d (%s)\n", ret, ret ? "Error" : "OK");
return ret ? ret : written;
}
/* send a keypress to the IR TX device */
static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
{
unsigned char data_block[TX_BLOCK_SIZE];
unsigned char buf[2];
int i, ret;
/* Get data for the codeset/key */
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
zilog_error("failed to get data for code %u, key %u -- check "
"lircd.conf entries\n", code, key);
return ret;
} else if (ret != 0)
return ret;
/* Send the data block */
ret = send_data_block(tx, data_block);
if (ret != 0)
return ret;
/* Send data block length? */
buf[0] = 0x00;
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Give the z8 a moment to process data block */
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
zilog_error("unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
/* Send prepare command? */
buf[0] = 0x00;
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* The sleep bits aren't necessary on the HD PVR, and in fact, the
* last i2c_master_recv always fails with a -5, so for now, we're
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* This bit NAKs until the device is ready, so we retry it
* sleeping a bit each time. This seems to be what the windows
* driver does, approximately.
* Try for up to 1s.
*/
for (i = 0; i < 20; ++i) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((50 * HZ + 999) / 1000);
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
dprintk("NAK expected: i2c_master_send "
"failed with %d (try %d)\n", ret, i+1);
}
if (ret != 1) {
zilog_error("IR TX chip never got ready: last i2c_master_send "
"failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
zilog_error("unexpected IR TX response #2: %02x\n", buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* Write a code to the device. We take in a 32-bit number (an int) and then
* decode this to a codeset/key index. The key data is then decompressed and
* sent to the device. We have a spin lock as per i2c documentation to prevent
* multiple concurrent sends which would probably cause the device to explode.
*/
static ssize_t write(struct file *filep, const char *buf, size_t n,
loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_tx *tx;
size_t i;
int failures = 0;
/* Validate user parameters */
if (n % sizeof(int))
return -EINVAL;
/* Get a struct IR_tx reference */
tx = get_ir_tx(ir);
if (tx == NULL)
return -ENXIO;
/* Ensure our tx->c i2c_client remains valid for the duration */
mutex_lock(&tx->client_lock);
if (tx->c == NULL) {
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -ENXIO;
}
/* Lock i2c bus for the duration */
mutex_lock(&ir->ir_lock);
/* Send each keypress */
for (i = 0; i < n;) {
int ret = 0;
int command;
if (copy_from_user(&command, buf + i, sizeof(command))) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -EFAULT;
}
/* Send boot data first if required */
if (tx->need_boot == 1) {
/* Make sure we have the 'firmware' loaded, first */
ret = fw_load(tx);
if (ret != 0) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
if (ret != -ENOMEM)
ret = -EIO;
return ret;
}
/* Prep the chip for transmitting codes */
ret = send_boot_data(tx);
if (ret == 0)
tx->need_boot = 0;
}
/* Send the code */
if (ret == 0) {
ret = send_code(tx, (unsigned)command >> 16,
(unsigned)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
}
/*
* Hmm, a failure. If we've had a few then give up, otherwise
* try a reset
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
zilog_error("sending to the IR transmitter chip "
"failed, trying reset\n");
if (failures >= 3) {
zilog_error("unable to send to the IR chip "
"after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ + 999) / 1000);
tx->need_boot = 1;
++failures;
} else
i += sizeof(int);
}
/* Release i2c bus */
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
/* Give back our struct IR_tx reference */
put_ir_tx(tx, false);
/* All looks good */
return n;
}
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
dprintk("poll called\n");
rx = get_ir_rx(ir);
if (rx == NULL) {
/*
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
dprintk("poll result = POLLERR\n");
return POLLERR;
}
/*
* Add our lirc_buffer's wait_queue to the poll_table. A wake up on
* that buffer's wait queue indicates we may have a new poll status.
*/
poll_wait(filep, &rbuf->wait_poll, wait);
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
dprintk("poll result = %s\n", ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct IR *ir = filep->private_data;
int result;
unsigned long mode, features;
features = ir->l.features;
switch (cmd) {
case LIRC_GET_LENGTH:
result = put_user((unsigned long)13,
(unsigned long *)arg);
break;
case LIRC_GET_FEATURES:
result = put_user(features, (unsigned long *) arg);
break;
case LIRC_GET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = put_user(LIRC_REC2MODE
(features&LIRC_CAN_REC_MASK),
(unsigned long *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *)arg);
if (!result && !(LIRC_MODE2REC(mode) & features))
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
break;
case LIRC_SET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *) arg);
if (!result && mode != LIRC_MODE_PULSE)
return -EINVAL;
break;
default:
return -EINVAL;
}
return result;
}
static struct IR *get_ir_device_by_minor(unsigned int minor)
{
struct IR *ir;
struct IR *ret = NULL;
mutex_lock(&ir_devices_lock);
if (!list_empty(&ir_devices_list)) {
list_for_each_entry(ir, &ir_devices_list, list) {
if (ir->l.minor == minor) {
ret = get_ir_device(ir, true);
break;
}
}
}
mutex_unlock(&ir_devices_lock);
return ret;
}
/*
* Open the IR device. Get hold of our IR structure and
* stash it in private_data for the file
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
unsigned int minor = MINOR(node->i_rdev);
/* find our IR struct */
ir = get_ir_device_by_minor(minor);
if (ir == NULL)
return -ENODEV;
atomic_inc(&ir->open_count);
/* stash our IR struct */
filep->private_data = ir;
nonseekable_open(node, filep);
return 0;
}
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
/* find our IR struct */
struct IR *ir = filep->private_data;
if (ir == NULL) {
zilog_error("close: no private_data attached to the file!\n");
return -ENODEV;
}
atomic_dec(&ir->open_count);
put_ir_device(ir, false);
return 0;
}
static int ir_remove(struct i2c_client *client);
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
#define ID_FLAG_TX 0x01
#define ID_FLAG_HDPVR 0x02
static const struct i2c_device_id ir_transceiver_id[] = {
{ "ir_tx_z8f0811_haup", ID_FLAG_TX },
{ "ir_rx_z8f0811_haup", 0 },
{ "ir_tx_z8f0811_hdpvr", ID_FLAG_HDPVR | ID_FLAG_TX },
{ "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR },
{ }
};
static struct i2c_driver driver = {
.driver = {
.owner = THIS_MODULE,
.name = "Zilog/Hauppauge i2c IR",
},
.probe = ir_probe,
.remove = ir_remove,
.id_table = ir_transceiver_id,
};
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = read,
.write = write,
.poll = poll,
.unlocked_ioctl = ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ioctl,
#endif
.open = open,
.release = close
};
static struct lirc_driver lirc_template = {
.name = "lirc_zilog",
.minor = -1,
.code_length = 13,
.buffer_size = BUFLEN / 2,
.sample_rate = 0, /* tell lirc_dev to not start its own kthread */
.chunk_size = 2,
.set_use_inc = set_use_inc,
.set_use_dec = set_use_dec,
.fops = &lirc_fops,
.owner = THIS_MODULE,
};
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
struct IR_tx *tx = i2c_get_clientdata(client);
if (tx != NULL) {
mutex_lock(&tx->client_lock);
tx->c = NULL;
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
}
} else if (strncmp("ir_rx_z8", client->name, 8) == 0) {
struct IR_rx *rx = i2c_get_clientdata(client);
if (rx != NULL) {
mutex_lock(&rx->client_lock);
rx->c = NULL;
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
}
}
return 0;
}
/* ir_devices_lock must be held */
static struct IR *get_ir_device_by_adapter(struct i2c_adapter *adapter)
{
struct IR *ir;
if (list_empty(&ir_devices_list))
return NULL;
list_for_each_entry(ir, &ir_devices_list, list)
if (ir->adapter == adapter) {
get_ir_device(ir, true);
return ir;
}
return NULL;
}
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct IR *ir;
struct IR_tx *tx;
struct IR_rx *rx;
struct i2c_adapter *adap = client->adapter;
int ret;
bool tx_probe = false;
dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
__func__, id->name, adap->nr, adap->name, client->addr);
/*
* The IR receiver is at i2c address 0x71.
* The IR transmitter is at i2c address 0x70.
*/
if (id->driver_data & ID_FLAG_TX)
tx_probe = true;
else if (tx_only) /* module option */
return -ENXIO;
zilog_info("probing IR %s on %s (i2c-%d)\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
/* Use a single struct IR instance for both the Rx and Tx functions */
ir = get_ir_device_by_adapter(adap);
if (ir == NULL) {
ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
if (ir == NULL) {
ret = -ENOMEM;
goto out_no_ir;
}
kref_init(&ir->ref);
/* store for use in ir_probe() again, and open() later on */
INIT_LIST_HEAD(&ir->list);
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
* Currently our own lirc_fops rely on this ir->l.rbuf pointer
*/
ir->l.rbuf = &ir->rbuf;
ir->l.dev = &adap->dev;
ret = lirc_buffer_init(ir->l.rbuf,
ir->l.chunk_size, ir->l.buffer_size);
if (ret)
goto out_put_ir;
}
if (tx_probe) {
/* Get the IR_rx instance for later, if already allocated */
rx = get_ir_rx(ir);
/* Set up a struct IR_tx instance */
tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
if (tx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&tx->ref);
ir->tx = tx;
ir->l.features |= LIRC_CAN_SEND_PULSE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
tx->post_tx_ready_poll =
(id->driver_data & ID_FLAG_HDPVR) ? false : true;
/* An ir ref goes to the struct IR_tx instance */
tx->ir = get_ir_device(ir, true);
/* A tx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_tx(ir));
/*
* Load the 'firmware'. We do this before registering with
* lirc_dev, so the first firmware load attempt does not happen
* after a open() or write() call on the device.
*
* Failure here is not deemed catastrophic, so the receiver will
* still be usable. Firmware load will be retried in write(),
* if it is needed.
*/
fw_load(tx);
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
zilog_info("probe of IR Tx on %s (i2c-%d) done. Waiting"
" on IR Rx.\n", adap->name, adap->nr);
goto out_ok;
}
} else {
/* Get the IR_tx instance for later, if already allocated */
tx = get_ir_tx(ir);
/* Set up a struct IR_rx instance */
rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
if (rx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&rx->ref);
ir->rx = rx;
ir->l.features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
(id->driver_data & ID_FLAG_HDPVR) ? true : false;
/* An ir ref goes to the struct IR_rx instance */
rx->ir = get_ir_device(ir, true);
/* An rx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_rx(ir));
/*
* Start the polling thread.
* It will only perform an empty loop around schedule_timeout()
* until we register with lirc_dev and the first user open()
*/
/* An ir ref goes to the new rx polling kthread */
rx->task = kthread_run(lirc_thread, get_ir_device(ir, true),
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
zilog_error("%s: could not start IR Rx polling thread"
"\n", __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_xx;
}
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
zilog_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
" on IR Tx.\n", adap->name, adap->nr);
goto out_ok;
}
}
/* register with lirc */
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
__func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
zilog_info("IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
adap->name, adap->nr, ir->l.minor);
out_ok:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
zilog_info("probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
out_put_xx:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
__func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
ret);
mutex_unlock(&ir_devices_lock);
return ret;
}
static int __init zilog_init(void)
{
int ret;
zilog_notify("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
request_module("firmware_class");
ret = i2c_add_driver(&driver);
if (ret)
zilog_error("initialization failed\n");
else
zilog_notify("initialization complete\n");
return ret;
}
static void __exit zilog_exit(void)
{
i2c_del_driver(&driver);
/* if loaded */
fw_unload();
zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
}
module_init(zilog_init);
module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
"Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
"Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
module_param(minor, int, 0444);
MODULE_PARM_DESC(minor, "Preferred minor device number");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
module_param(tx_only, bool, 0644);
MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
| gpl-2.0 |
nizovn/linux | drivers/s390/char/tape_class.c | 2377 | 2993 | /*
* Copyright IBM Corp. 2004
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
"Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
tcd->char_device->dev = dev;
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create(THIS_MODULE, "tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);
| gpl-2.0 |
arkusuma/mediapad-kernel-ics | drivers/target/target_core_tmr.c | 2377 | 13390 | /*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_pr.h"
#define DEBUG_LUN_RESET
#ifdef DEBUG_LUN_RESET
#define DEBUG_LR(x...) printk(KERN_INFO x)
#else
#define DEBUG_LR(x...)
#endif
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function)
{
struct se_tmr_req *tmr;
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return tmr;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(
struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
if (!dev) {
kmem_cache_free(se_tmr_req_cache, tmr);
return;
}
spin_lock(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
spin_unlock(&dev->se_tmr_lock);
kmem_cache_free(se_tmr_req_cache, tmr);
}
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas,
int fe_count)
{
if (!(fe_count)) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
if (((tmr_nacl != NULL) &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
transport_cmd_finish_abort(cmd, 0);
}
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
struct se_cmd *cmd;
struct se_queue_req *qr, *qr_tmp;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_queue_obj *qobj = dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
int fe_count, state, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = DEV_ATTRIB(dev)->emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
TPG_TFO(tmr_tpg)->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
if (tmr && (tmr_p == tmr))
continue;
cmd = tmr_p->task_cmd;
if (!(cmd)) {
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
spin_unlock(&dev->se_tmr_lock);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
spin_lock(&dev->se_tmr_lock);
}
spin_unlock(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
* a) "Yes" indicates that each command that is aborted on an I_T nexus
* other than the one that caused the SCSI device condition is
* completed with TASK ABORTED status, if the TAS bit is set to one in
* the Control mode page (see SPC-4). "No" indicates that no status is
* returned for aborted commands.
*
* d) If the logical unit reset is caused by a particular I_T nexus
* (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
* (TASK_ABORTED status) applies.
*
* Otherwise (e.g., if triggered by a hard reset), "no"
* (no TASK_ABORTED SAM status) applies.
*
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
if (!(TASK_CMD(task))) {
printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
continue;
}
cmd = TASK_CMD(task);
if (!T_TASK(cmd)) {
printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
" %p ITT: 0x%08x\n", task, cmd,
CMD_TFO(cmd)->get_task_tag(cmd));
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
list_del(&task->t_state_list);
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
CMD_TFO(cmd)->get_task_tag(cmd), 0,
CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
T_TASK(cmd)->t_task_cdbs,
atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
atomic_read(&T_TASK(cmd)->t_transport_active),
atomic_read(&T_TASK(cmd)->t_transport_stop),
atomic_read(&T_TASK(cmd)->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} else {
if (atomic_read(&task->task_execute_queue) != 0)
transport_remove_task_from_execute_queue(task, dev);
}
__transport_stop_task_timer(task, &flags);
if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
/*
* Release all commands remaining in the struct se_device cmd queue.
*
* This follows the same logic as above for the struct se_device
* struct se_task state list, where commands are returned with
* TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
cmd = (struct se_cmd *)qr->cmd;
if (!(cmd)) {
/*
* Skip these for non PREEMPT_AND_ABORT usage..
*/
if (preempt_and_abort_list != NULL)
continue;
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
kfree(qr);
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
state = qr->state;
kfree(qr);
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, state,
atomic_read(&T_TASK(cmd)->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
* and call TFO->new_cmd_failure() to wakeup any fabric
* dependent code used to wait for unsolicited data out
* allocation to complete. The fabric module is expected
* to dump any remaining unsolicited data out for the aborted
* command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&T_TASK(cmd)->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!(preempt_and_abort_list) &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name);
return 0;
}
| gpl-2.0 |
motley-git/TF201-Kernel | arch/powerpc/oprofile/op_model_fsl_emb.c | 3145 | 6643 | /*
* Freescale Embedded oprofile support, based on ppc64 oprofile support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
*
* Author: Andy Fleming
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/reg_fsl_emb.h>
#include <asm/page.h>
#include <asm/pmc.h>
#include <asm/oprofile_impl.h>
static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static int oprofile_running;
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
case 0:
return mfpmr(PMRN_PMC0);
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
break;
}
}
static void init_pmc_stop(int ctr)
{
u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
PMLCA_FCM1 | PMLCA_FCM0);
u32 pmlcb = 0;
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
default:
panic("Bad ctr number!\n");
}
}
static void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
static void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
static void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
static void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
{
int i;
/* freeze all counters */
pmc_stop_ctrs();
for (i = 0;i < num_counters;i++) {
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
return 0;
}
static int fsl_emb_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_counters = num_ctrs;
/* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value
* which will give us "count" until overflow.
* Then we set the events on the enabled counters */
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
return 0;
}
static int fsl_emb_start(struct op_counter_config *ctr)
{
int i;
mtmsr(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
ctr_write(i, reset_value[i]);
/* Set each enabled counter to only
* count when the Mark bit is *not* set */
set_pmc_marked(i, 1, 0);
pmc_start_ctr(i, 1);
} else {
ctr_write(i, 0);
/* Set the ctr to be stopped */
pmc_start_ctr(i, 0);
}
}
/* Clear the freeze bit, and enable the interrupt.
* The counters won't actually start until the rfi clears
* the PMM bit */
pmc_start_ctrs(1);
oprofile_running = 1;
pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
mfpmr(PMRN_PMGC0));
return 0;
}
static void fsl_emb_stop(void)
{
/* freeze counters */
pmc_stop_ctrs();
oprofile_running = 0;
pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
mfpmr(PMRN_PMGC0));
mb();
}
static void fsl_emb_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc;
int is_kernel;
int val;
int i;
pc = regs->nip;
is_kernel = is_kernel_addr(pc);
for (i = 0; i < num_counters; ++i) {
val = ctr_read(i);
if (val < 0) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
}
}
}
/* The freeze bit was set by the interrupt. */
/* Clear the freeze bit, and reenable the interrupt. The
* counters won't actually start until the rfi clears the PMM
* bit. The PMM bit should not be set until after the interrupt
* is cleared to avoid it getting lost in some hypervisor
* environments.
*/
mtmsr(mfmsr() | MSR_PMM);
pmc_start_ctrs(1);
}
struct op_powerpc_model op_model_fsl_emb = {
.reg_setup = fsl_emb_reg_setup,
.cpu_setup = fsl_emb_cpu_setup,
.start = fsl_emb_start,
.stop = fsl_emb_stop,
.handle_interrupt = fsl_emb_handle_interrupt,
};
| gpl-2.0 |
linuxmake/kernel_softwinner_fiber | lib/mpi/mpi-gcd.c | 4937 | 1561 | /* mpi-gcd.c - MPI functions
* Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "mpi-internal.h"
/****************
* Find the greatest common divisor G of A and B.
* Return: true if this 1, false in all other cases
*/
int mpi_gcd(MPI g, const MPI xa, const MPI xb)
{
MPI a = NULL, b = NULL;
if (mpi_copy(&a, xa) < 0)
goto nomem;
if (mpi_copy(&b, xb) < 0)
goto nomem;
/* TAOCP Vol II, 4.5.2, Algorithm A */
a->sign = 0;
b->sign = 0;
while (mpi_cmp_ui(b, 0)) {
if (mpi_fdiv_r(g, a, b) < 0) /* g used as temorary variable */
goto nomem;
if (mpi_set(a, b) < 0)
goto nomem;
if (mpi_set(b, g) < 0)
goto nomem;
}
if (mpi_set(g, a) < 0)
goto nomem;
mpi_free(a);
mpi_free(b);
return !mpi_cmp_ui(g, 1);
nomem:
mpi_free(a);
mpi_free(b);
return -ENOMEM;
}
| gpl-2.0 |
weevergh/android_kernel_lenovo_kiton | drivers/scsi/vmw_pvscsi.c | 4937 | 39449 | /*
* Linux driver for VMware's para-virtualized SCSI HBA.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "vmw_pvscsi.h"
#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
MODULE_AUTHOR("VMware, Inc.");
MODULE_LICENSE("GPL");
MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
#define SGL_SIZE PAGE_SIZE
struct pvscsi_sg_list {
struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
};
struct pvscsi_ctx {
/*
* The index of the context in cmd_map serves as the context ID for a
* 1-to-1 mapping completions back to requests.
*/
struct scsi_cmnd *cmd;
struct pvscsi_sg_list *sgl;
struct list_head list;
dma_addr_t dataPA;
dma_addr_t sensePA;
dma_addr_t sglPA;
};
struct pvscsi_adapter {
char *mmioBase;
unsigned int irq;
u8 rev;
bool use_msi;
bool use_msix;
bool use_msg;
spinlock_t hw_lock;
struct workqueue_struct *workqueue;
struct work_struct work;
struct PVSCSIRingReqDesc *req_ring;
unsigned req_pages;
unsigned req_depth;
dma_addr_t reqRingPA;
struct PVSCSIRingCmpDesc *cmp_ring;
unsigned cmp_pages;
dma_addr_t cmpRingPA;
struct PVSCSIRingMsgDesc *msg_ring;
unsigned msg_pages;
dma_addr_t msgRingPA;
struct PVSCSIRingsState *rings_state;
dma_addr_t ringStatePA;
struct pci_dev *dev;
struct Scsi_Host *host;
struct list_head cmd_pool;
struct pvscsi_ctx *cmd_map;
};
/* Command line parameters */
static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
static bool pvscsi_disable_msi;
static bool pvscsi_disable_msix;
static bool pvscsi_use_msg = true;
#define PVSCSI_RW (S_IRUSR | S_IWUSR)
module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
__stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
__stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
__stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
static const struct pci_device_id pvscsi_pci_tbl[] = {
{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
static struct device *
pvscsi_dev(const struct pvscsi_adapter *adapter)
{
return &(adapter->dev->dev);
}
static struct pvscsi_ctx *
pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
{
struct pvscsi_ctx *ctx, *end;
end = &adapter->cmd_map[adapter->req_depth];
for (ctx = adapter->cmd_map; ctx < end; ctx++)
if (ctx->cmd == cmd)
return ctx;
return NULL;
}
static struct pvscsi_ctx *
pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
{
struct pvscsi_ctx *ctx;
if (list_empty(&adapter->cmd_pool))
return NULL;
ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
ctx->cmd = cmd;
list_del(&ctx->list);
return ctx;
}
static void pvscsi_release_context(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
ctx->cmd = NULL;
list_add(&ctx->list, &adapter->cmd_pool);
}
/*
* Map a pvscsi_ctx struct to a context ID field value; we map to a simple
* non-zero integer. ctx always points to an entry in cmd_map array, hence
* the return value is always >=1.
*/
static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
const struct pvscsi_ctx *ctx)
{
return ctx - adapter->cmd_map + 1;
}
static struct pvscsi_ctx *
pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
{
return &adapter->cmd_map[context - 1];
}
static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
u32 offset, u32 val)
{
writel(val, adapter->mmioBase + offset);
}
static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
{
return readl(adapter->mmioBase + offset);
}
static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
{
return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
}
static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
u32 val)
{
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
}
static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
{
u32 intr_bits;
intr_bits = PVSCSI_INTR_CMPL_MASK;
if (adapter->use_msg)
intr_bits |= PVSCSI_INTR_MSG_MASK;
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
}
static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
{
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
}
static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
u32 cmd, const void *desc, size_t len)
{
const u32 *ptr = desc;
size_t i;
len /= sizeof(*ptr);
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
for (i = 0; i < len; i++)
pvscsi_reg_write(adapter,
PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
}
static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
const struct pvscsi_ctx *ctx)
{
struct PVSCSICmdDescAbortCmd cmd = { 0 };
cmd.target = ctx->cmd->device->id;
cmd.context = pvscsi_map_context(adapter, ctx);
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
}
static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
{
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
}
static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
{
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
}
static int scsi_is_rw(unsigned char op)
{
return op == READ_6 || op == WRITE_6 ||
op == READ_10 || op == WRITE_10 ||
op == READ_12 || op == WRITE_12 ||
op == READ_16 || op == WRITE_16;
}
static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
unsigned char op)
{
if (scsi_is_rw(op))
pvscsi_kick_rw_io(adapter);
else
pvscsi_process_request_ring(adapter);
}
static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
{
dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
}
static void ll_bus_reset(const struct pvscsi_adapter *adapter)
{
dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
}
static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
{
struct PVSCSICmdDescResetDevice cmd = { 0 };
dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
cmd.target = target;
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
&cmd, sizeof(cmd));
}
static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
struct scatterlist *sg, unsigned count)
{
unsigned i;
struct PVSCSISGElement *sge;
BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
sge = &ctx->sgl->sge[0];
for (i = 0; i < count; i++, sg++) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].flags = 0;
}
}
/*
* Map all data buffers for a command into PCI space and
* setup the scatter/gather list if needed.
*/
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
struct PVSCSIRingReqDesc *e)
{
unsigned count;
unsigned bufflen = scsi_bufflen(cmd);
struct scatterlist *sg;
e->dataLen = bufflen;
e->dataAddr = 0;
if (bufflen == 0)
return;
sg = scsi_sglist(cmd);
count = scsi_sg_count(cmd);
if (count != 0) {
int segs = scsi_dma_map(cmd);
if (segs > 1) {
pvscsi_create_sg(ctx, sg, segs);
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
SGL_SIZE, PCI_DMA_TODEVICE);
e->dataAddr = ctx->sglPA;
} else
e->dataAddr = sg_dma_address(sg);
} else {
/*
* In case there is no S/G list, scsi_sglist points
* directly to the buffer.
*/
ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
cmd->sc_data_direction);
e->dataAddr = ctx->dataPA;
}
}
static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx)
{
struct scsi_cmnd *cmd;
unsigned bufflen;
cmd = ctx->cmd;
bufflen = scsi_bufflen(cmd);
if (bufflen != 0) {
unsigned count = scsi_sg_count(cmd);
if (count != 0) {
scsi_dma_unmap(cmd);
if (ctx->sglPA) {
pci_unmap_single(adapter->dev, ctx->sglPA,
SGL_SIZE, PCI_DMA_TODEVICE);
ctx->sglPA = 0;
}
} else
pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
cmd->sc_data_direction);
}
if (cmd->sense_buffer)
pci_unmap_single(adapter->dev, ctx->sensePA,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
}
static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
&adapter->ringStatePA);
if (!adapter->rings_state)
return -ENOMEM;
adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
pvscsi_ring_pages);
adapter->req_depth = adapter->req_pages
* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
adapter->req_ring = pci_alloc_consistent(adapter->dev,
adapter->req_pages * PAGE_SIZE,
&adapter->reqRingPA);
if (!adapter->req_ring)
return -ENOMEM;
adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
pvscsi_ring_pages);
adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
adapter->cmp_pages * PAGE_SIZE,
&adapter->cmpRingPA);
if (!adapter->cmp_ring)
return -ENOMEM;
BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
if (!adapter->use_msg)
return 0;
adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
pvscsi_msg_ring_pages);
adapter->msg_ring = pci_alloc_consistent(adapter->dev,
adapter->msg_pages * PAGE_SIZE,
&adapter->msgRingPA);
if (!adapter->msg_ring)
return -ENOMEM;
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
return 0;
}
static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
{
struct PVSCSICmdDescSetupRings cmd = { 0 };
dma_addr_t base;
unsigned i;
cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
cmd.reqRingNumPages = adapter->req_pages;
cmd.cmpRingNumPages = adapter->cmp_pages;
base = adapter->reqRingPA;
for (i = 0; i < adapter->req_pages; i++) {
cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
base += PAGE_SIZE;
}
base = adapter->cmpRingPA;
for (i = 0; i < adapter->cmp_pages; i++) {
cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
base += PAGE_SIZE;
}
memset(adapter->rings_state, 0, PAGE_SIZE);
memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
&cmd, sizeof(cmd));
if (adapter->use_msg) {
struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
cmd_msg.numPages = adapter->msg_pages;
base = adapter->msgRingPA;
for (i = 0; i < adapter->msg_pages; i++) {
cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
base += PAGE_SIZE;
}
memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
&cmd_msg, sizeof(cmd_msg));
}
}
/*
* Pull a completion descriptor off and pass the completion back
* to the SCSI mid layer.
*/
static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
const struct PVSCSIRingCmpDesc *e)
{
struct pvscsi_ctx *ctx;
struct scsi_cmnd *cmd;
u32 btstat = e->hostStatus;
u32 sdstat = e->scsiStatus;
ctx = pvscsi_get_context(adapter, e->context);
cmd = ctx->cmd;
pvscsi_unmap_buffers(adapter, ctx);
pvscsi_release_context(adapter, ctx);
cmd->result = 0;
if (sdstat != SAM_STAT_GOOD &&
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
cmd->result = (DID_OK << 16) | sdstat;
if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
cmd->result |= (DRIVER_SENSE << 24);
} else
switch (btstat) {
case BTSTAT_SUCCESS:
case BTSTAT_LINKED_COMMAND_COMPLETED:
case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
/* If everything went fine, let's move on.. */
cmd->result = (DID_OK << 16);
break;
case BTSTAT_DATARUN:
case BTSTAT_DATA_UNDERRUN:
/* Report residual data in underruns */
scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_ERROR << 16);
break;
case BTSTAT_SELTIMEO:
/* Our emulation returns this for non-connected devs */
cmd->result = (DID_BAD_TARGET << 16);
break;
case BTSTAT_LUNMISMATCH:
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
cmd->result = (DRIVER_INVALID << 24);
/* fall through */
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
case BTSTAT_HATIMEOUT:
case BTSTAT_NORESPONSE:
case BTSTAT_DISCONNECT:
case BTSTAT_HASOFTWARE:
case BTSTAT_BUSFREE:
case BTSTAT_SENSFAILED:
cmd->result |= (DID_ERROR << 16);
break;
case BTSTAT_SENTRST:
case BTSTAT_RECVRST:
case BTSTAT_BUSRESET:
cmd->result = (DID_RESET << 16);
break;
case BTSTAT_ABORTQUEUE:
cmd->result = (DID_ABORT << 16);
break;
case BTSTAT_SCSIPARITY:
cmd->result = (DID_PARITY << 16);
break;
default:
cmd->result = (DID_ERROR << 16);
scmd_printk(KERN_DEBUG, cmd,
"Unknown completion status: 0x%x\n",
btstat);
}
dev_dbg(&cmd->device->sdev_gendev,
"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
cmd->scsi_done(cmd);
}
/*
* barrier usage : Since the PVSCSI device is emulated, there could be cases
* where we may want to serialize some accesses between the driver and the
* emulation layer. We use compiler barriers instead of the more expensive
* memory barriers because PVSCSI is only supported on X86 which has strong
* memory access ordering.
*/
static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
{
struct PVSCSIRingsState *s = adapter->rings_state;
struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
u32 cmp_entries = s->cmpNumEntriesLog2;
while (s->cmpConsIdx != s->cmpProdIdx) {
struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
MASK(cmp_entries));
/*
* This barrier() ensures that *e is not dereferenced while
* the device emulation still writes data into the slot.
* Since the device emulation advances s->cmpProdIdx only after
* updating the slot we want to check it first.
*/
barrier();
pvscsi_complete_request(adapter, e);
/*
* This barrier() ensures that compiler doesn't reorder write
* to s->cmpConsIdx before the read of (*e) inside
* pvscsi_complete_request. Otherwise, device emulation may
* overwrite *e before we had a chance to read it.
*/
barrier();
s->cmpConsIdx++;
}
}
/*
* Translate a Linux SCSI request into a request ring entry.
*/
static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
{
struct PVSCSIRingsState *s;
struct PVSCSIRingReqDesc *e;
struct scsi_device *sdev;
u32 req_entries;
s = adapter->rings_state;
sdev = cmd->device;
req_entries = s->reqNumEntriesLog2;
/*
* If this condition holds, we might have room on the request ring, but
* we might not have room on the completion ring for the response.
* However, we have already ruled out this possibility - we would not
* have successfully allocated a context if it were true, since we only
* have one context per request entry. Check for it anyway, since it
* would be a serious bug.
*/
if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
"ring full: reqProdIdx=%d cmpConsIdx=%d\n",
s->reqProdIdx, s->cmpConsIdx);
return -1;
}
e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
e->bus = sdev->channel;
e->target = sdev->id;
memset(e->lun, 0, sizeof(e->lun));
e->lun[1] = sdev->lun;
if (cmd->sense_buffer) {
ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
e->senseAddr = ctx->sensePA;
e->senseLen = SCSI_SENSE_BUFFERSIZE;
} else {
e->senseLen = 0;
e->senseAddr = 0;
}
e->cdbLen = cmd->cmd_len;
e->vcpuHint = smp_processor_id();
memcpy(e->cdb, cmd->cmnd, e->cdbLen);
e->tag = SIMPLE_QUEUE_TAG;
if (sdev->tagged_supported &&
(cmd->tag == HEAD_OF_QUEUE_TAG ||
cmd->tag == ORDERED_QUEUE_TAG))
e->tag = cmd->tag;
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
else if (cmd->sc_data_direction == DMA_TO_DEVICE)
e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
else if (cmd->sc_data_direction == DMA_NONE)
e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
else
e->flags = 0;
pvscsi_map_buffers(adapter, ctx, cmd, e);
e->context = pvscsi_map_context(adapter, ctx);
barrier();
s->reqProdIdx++;
return 0;
}
static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
struct pvscsi_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&adapter->hw_lock, flags);
ctx = pvscsi_acquire_context(adapter, cmd);
if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
if (ctx)
pvscsi_release_context(adapter, ctx);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
cmd->scsi_done = done;
dev_dbg(&cmd->device->sdev_gendev,
"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
pvscsi_kick_io(adapter, cmd->cmnd[0]);
return 0;
}
static DEF_SCSI_QCMD(pvscsi_queue)
static int pvscsi_abort(struct scsi_cmnd *cmd)
{
struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
struct pvscsi_ctx *ctx;
unsigned long flags;
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
spin_lock_irqsave(&adapter->hw_lock, flags);
/*
* Poll the completion ring first - we might be trying to abort
* a command that is waiting to be dispatched in the completion ring.
*/
pvscsi_process_completion_ring(adapter);
/*
* If there is no context for the command, it either already succeeded
* or else was never properly issued. Not our problem.
*/
ctx = pvscsi_find_context(adapter, cmd);
if (!ctx) {
scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
goto out;
}
pvscsi_abort_cmd(adapter, ctx);
pvscsi_process_completion_ring(adapter);
out:
spin_unlock_irqrestore(&adapter->hw_lock, flags);
return SUCCESS;
}
/*
* Abort all outstanding requests. This is only safe to use if the completion
* ring will never be walked again or the device has been reset, because it
* destroys the 1-1 mapping between context field passed to emulation and our
* request structure.
*/
static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
{
unsigned i;
for (i = 0; i < adapter->req_depth; i++) {
struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
struct scsi_cmnd *cmd = ctx->cmd;
if (cmd) {
scmd_printk(KERN_ERR, cmd,
"Forced reset on cmd %p\n", cmd);
pvscsi_unmap_buffers(adapter, ctx);
pvscsi_release_context(adapter, ctx);
cmd->result = (DID_RESET << 16);
cmd->scsi_done(cmd);
}
}
}
static int pvscsi_host_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
unsigned long flags;
bool use_msg;
scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
spin_lock_irqsave(&adapter->hw_lock, flags);
use_msg = adapter->use_msg;
if (use_msg) {
adapter->use_msg = 0;
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/*
* Now that we know that the ISR won't add more work on the
* workqueue we can safely flush any outstanding work.
*/
flush_workqueue(adapter->workqueue);
spin_lock_irqsave(&adapter->hw_lock, flags);
}
/*
* We're going to tear down the entire ring structure and set it back
* up, so stalling new requests until all completions are flushed and
* the rings are back in place.
*/
pvscsi_process_request_ring(adapter);
ll_adapter_reset(adapter);
/*
* Now process any completions. Note we do this AFTER adapter reset,
* which is strange, but stops races where completions get posted
* between processing the ring and issuing the reset. The backend will
* not touch the ring memory after reset, so the immediately pre-reset
* completion ring state is still valid.
*/
pvscsi_process_completion_ring(adapter);
pvscsi_reset_all(adapter);
adapter->use_msg = use_msg;
pvscsi_setup_all_rings(adapter);
pvscsi_unmask_intr(adapter);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
return SUCCESS;
}
static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
unsigned long flags;
scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
/*
* We don't want to queue new requests for this bus after
* flushing all pending requests to emulation, since new
* requests could then sneak in during this bus reset phase,
* so take the lock now.
*/
spin_lock_irqsave(&adapter->hw_lock, flags);
pvscsi_process_request_ring(adapter);
ll_bus_reset(adapter);
pvscsi_process_completion_ring(adapter);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
return SUCCESS;
}
static int pvscsi_device_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct pvscsi_adapter *adapter = shost_priv(host);
unsigned long flags;
scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
host->host_no, cmd->device->id);
/*
* We don't want to queue new requests for this device after flushing
* all pending requests to emulation, since new requests could then
* sneak in during this device reset phase, so take the lock now.
*/
spin_lock_irqsave(&adapter->hw_lock, flags);
pvscsi_process_request_ring(adapter);
ll_device_reset(adapter, cmd->device->id);
pvscsi_process_completion_ring(adapter);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
return SUCCESS;
}
static struct scsi_host_template pvscsi_template;
static const char *pvscsi_info(struct Scsi_Host *host)
{
struct pvscsi_adapter *adapter = shost_priv(host);
static char buf[256];
sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
"%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
pvscsi_template.cmd_per_lun);
return buf;
}
static struct scsi_host_template pvscsi_template = {
.module = THIS_MODULE,
.name = "VMware PVSCSI Host Adapter",
.proc_name = "vmw_pvscsi",
.info = pvscsi_info,
.queuecommand = pvscsi_queue,
.this_id = -1,
.sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
.dma_boundary = UINT_MAX,
.max_sectors = 0xffff,
.use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = pvscsi_abort,
.eh_device_reset_handler = pvscsi_device_reset,
.eh_bus_reset_handler = pvscsi_bus_reset,
.eh_host_reset_handler = pvscsi_host_reset,
};
static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
const struct PVSCSIRingMsgDesc *e)
{
struct PVSCSIRingsState *s = adapter->rings_state;
struct Scsi_Host *host = adapter->host;
struct scsi_device *sdev;
printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
if (e->type == PVSCSI_MSG_DEV_ADDED) {
struct PVSCSIMsgDescDevStatusChanged *desc;
desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
printk(KERN_INFO
"vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
desc->bus, desc->target, desc->lun[1]);
if (!scsi_host_get(host))
return;
sdev = scsi_device_lookup(host, desc->bus, desc->target,
desc->lun[1]);
if (sdev) {
printk(KERN_INFO "vmw_pvscsi: device already exists\n");
scsi_device_put(sdev);
} else
scsi_add_device(adapter->host, desc->bus,
desc->target, desc->lun[1]);
scsi_host_put(host);
} else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
struct PVSCSIMsgDescDevStatusChanged *desc;
desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
printk(KERN_INFO
"vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
desc->bus, desc->target, desc->lun[1]);
if (!scsi_host_get(host))
return;
sdev = scsi_device_lookup(host, desc->bus, desc->target,
desc->lun[1]);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
} else
printk(KERN_INFO
"vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
desc->bus, desc->target, desc->lun[1]);
scsi_host_put(host);
}
}
static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
{
struct PVSCSIRingsState *s = adapter->rings_state;
return s->msgProdIdx != s->msgConsIdx;
}
static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
{
struct PVSCSIRingsState *s = adapter->rings_state;
struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
u32 msg_entries = s->msgNumEntriesLog2;
while (pvscsi_msg_pending(adapter)) {
struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
MASK(msg_entries));
barrier();
pvscsi_process_msg(adapter, e);
barrier();
s->msgConsIdx++;
}
}
static void pvscsi_msg_workqueue_handler(struct work_struct *data)
{
struct pvscsi_adapter *adapter;
adapter = container_of(data, struct pvscsi_adapter, work);
pvscsi_process_msg_ring(adapter);
}
static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
{
char name[32];
if (!pvscsi_use_msg)
return 0;
pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
PVSCSI_CMD_SETUP_MSG_RING);
if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
return 0;
snprintf(name, sizeof(name),
"vmw_pvscsi_wq_%u", adapter->host->host_no);
adapter->workqueue = create_singlethread_workqueue(name);
if (!adapter->workqueue) {
printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
return 0;
}
INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
return 1;
}
static irqreturn_t pvscsi_isr(int irq, void *devp)
{
struct pvscsi_adapter *adapter = devp;
int handled;
if (adapter->use_msi || adapter->use_msix)
handled = true;
else {
u32 val = pvscsi_read_intr_status(adapter);
handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
if (handled)
pvscsi_write_intr_status(devp, val);
}
if (handled) {
unsigned long flags;
spin_lock_irqsave(&adapter->hw_lock, flags);
pvscsi_process_completion_ring(adapter);
if (adapter->use_msg && pvscsi_msg_pending(adapter))
queue_work(adapter->workqueue, &adapter->work);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
}
return IRQ_RETVAL(handled);
}
static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
{
struct pvscsi_ctx *ctx = adapter->cmd_map;
unsigned i;
for (i = 0; i < adapter->req_depth; ++i, ++ctx)
free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
}
static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
unsigned int *irq)
{
struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
int ret;
ret = pci_enable_msix(adapter->dev, &entry, 1);
if (ret)
return ret;
*irq = entry.vector;
return 0;
}
static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
{
if (adapter->irq) {
free_irq(adapter->irq, adapter);
adapter->irq = 0;
}
if (adapter->use_msi) {
pci_disable_msi(adapter->dev);
adapter->use_msi = 0;
} else if (adapter->use_msix) {
pci_disable_msix(adapter->dev);
adapter->use_msix = 0;
}
}
static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
{
pvscsi_shutdown_intr(adapter);
if (adapter->workqueue)
destroy_workqueue(adapter->workqueue);
if (adapter->mmioBase)
pci_iounmap(adapter->dev, adapter->mmioBase);
pci_release_regions(adapter->dev);
if (adapter->cmd_map) {
pvscsi_free_sgls(adapter);
kfree(adapter->cmd_map);
}
if (adapter->rings_state)
pci_free_consistent(adapter->dev, PAGE_SIZE,
adapter->rings_state, adapter->ringStatePA);
if (adapter->req_ring)
pci_free_consistent(adapter->dev,
adapter->req_pages * PAGE_SIZE,
adapter->req_ring, adapter->reqRingPA);
if (adapter->cmp_ring)
pci_free_consistent(adapter->dev,
adapter->cmp_pages * PAGE_SIZE,
adapter->cmp_ring, adapter->cmpRingPA);
if (adapter->msg_ring)
pci_free_consistent(adapter->dev,
adapter->msg_pages * PAGE_SIZE,
adapter->msg_ring, adapter->msgRingPA);
}
/*
* Allocate scatter gather lists.
*
* These are statically allocated. Trying to be clever was not worth it.
*
* Dynamic allocation can fail, and we can't go deep into the memory
* allocator, since we're a SCSI driver, and trying too hard to allocate
* memory might generate disk I/O. We also don't want to fail disk I/O
* in that case because we can't get an allocation - the I/O could be
* trying to swap out data to free memory. Since that is pathological,
* just use a statically allocated scatter list.
*
*/
static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
{
struct pvscsi_ctx *ctx;
int i;
ctx = adapter->cmd_map;
BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
get_order(SGL_SIZE));
ctx->sglPA = 0;
BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
if (!ctx->sgl) {
for (; i >= 0; --i, --ctx) {
free_pages((unsigned long)ctx->sgl,
get_order(SGL_SIZE));
ctx->sgl = NULL;
}
return -ENOMEM;
}
}
return 0;
}
/*
* Query the device, fetch the config info and return the
* maximum number of targets on the adapter. In case of
* failure due to any reason return default i.e. 16.
*/
static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
{
struct PVSCSICmdDescConfigCmd cmd;
struct PVSCSIConfigPageHeader *header;
struct device *dev;
dma_addr_t configPagePA;
void *config_page;
u32 numPhys = 16;
dev = pvscsi_dev(adapter);
config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
&configPagePA);
if (!config_page) {
dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
goto exit;
}
BUG_ON(configPagePA & ~PAGE_MASK);
/* Fetch config info from the device. */
cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
cmd.cmpAddr = configPagePA;
cmd._pad = 0;
/*
* Mark the completion page header with error values. If the device
* completes the command successfully, it sets the status values to
* indicate success.
*/
header = config_page;
memset(header, 0, sizeof *header);
header->hostStatus = BTSTAT_INVPARAM;
header->scsiStatus = SDSTAT_CHECK;
pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
if (header->hostStatus == BTSTAT_SUCCESS &&
header->scsiStatus == SDSTAT_GOOD) {
struct PVSCSIConfigPageController *config;
config = config_page;
numPhys = config->numPhys;
} else
dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
header->hostStatus, header->scsiStatus);
pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
exit:
return numPhys;
}
static int __devinit pvscsi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
struct Scsi_Host *host;
struct device *dev;
unsigned int i;
unsigned long flags = 0;
int error;
error = -ENODEV;
if (pci_enable_device(pdev))
return error;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
} else {
printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
goto out_disable_device;
}
pvscsi_template.can_queue =
min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
pvscsi_template.cmd_per_lun =
min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
if (!host) {
printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
goto out_disable_device;
}
adapter = shost_priv(host);
memset(adapter, 0, sizeof(*adapter));
adapter->dev = pdev;
adapter->host = host;
spin_lock_init(&adapter->hw_lock);
host->max_channel = 0;
host->max_id = 16;
host->max_lun = 1;
host->max_cmd_len = 16;
adapter->rev = pdev->revision;
if (pci_request_regions(pdev, "vmw_pvscsi")) {
printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
goto out_free_host;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
continue;
if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
continue;
break;
}
if (i == DEVICE_COUNT_RESOURCE) {
printk(KERN_ERR
"vmw_pvscsi: adapter has no suitable MMIO region\n");
goto out_release_resources;
}
adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
if (!adapter->mmioBase) {
printk(KERN_ERR
"vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
i, PVSCSI_MEM_SPACE_SIZE);
goto out_release_resources;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, host);
ll_adapter_reset(adapter);
adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
error = pvscsi_allocate_rings(adapter);
if (error) {
printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
goto out_release_resources;
}
/*
* Ask the device for max number of targets.
*/
host->max_id = pvscsi_get_max_targets(adapter);
dev = pvscsi_dev(adapter);
dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
/*
* From this point on we should reset the adapter if anything goes
* wrong.
*/
pvscsi_setup_all_rings(adapter);
adapter->cmd_map = kcalloc(adapter->req_depth,
sizeof(struct pvscsi_ctx), GFP_KERNEL);
if (!adapter->cmd_map) {
printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
error = -ENOMEM;
goto out_reset_adapter;
}
INIT_LIST_HEAD(&adapter->cmd_pool);
for (i = 0; i < adapter->req_depth; i++) {
struct pvscsi_ctx *ctx = adapter->cmd_map + i;
list_add(&ctx->list, &adapter->cmd_pool);
}
error = pvscsi_allocate_sg(adapter);
if (error) {
printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
goto out_reset_adapter;
}
if (!pvscsi_disable_msix &&
pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
adapter->use_msix = 1;
} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
printk(KERN_INFO "vmw_pvscsi: using MSI\n");
adapter->use_msi = 1;
adapter->irq = pdev->irq;
} else {
printk(KERN_INFO "vmw_pvscsi: using INTx\n");
adapter->irq = pdev->irq;
flags = IRQF_SHARED;
}
error = request_irq(adapter->irq, pvscsi_isr, flags,
"vmw_pvscsi", adapter);
if (error) {
printk(KERN_ERR
"vmw_pvscsi: unable to request IRQ: %d\n", error);
adapter->irq = 0;
goto out_reset_adapter;
}
error = scsi_add_host(host, &pdev->dev);
if (error) {
printk(KERN_ERR
"vmw_pvscsi: scsi_add_host failed: %d\n", error);
goto out_reset_adapter;
}
dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
adapter->rev, host->host_no);
pvscsi_unmask_intr(adapter);
scsi_scan_host(host);
return 0;
out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
pvscsi_release_resources(adapter);
out_free_host:
scsi_host_put(host);
out_disable_device:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return error;
}
static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
{
pvscsi_mask_intr(adapter);
if (adapter->workqueue)
flush_workqueue(adapter->workqueue);
pvscsi_shutdown_intr(adapter);
pvscsi_process_request_ring(adapter);
pvscsi_process_completion_ring(adapter);
ll_adapter_reset(adapter);
}
static void pvscsi_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *host = pci_get_drvdata(dev);
struct pvscsi_adapter *adapter = shost_priv(host);
__pvscsi_shutdown(adapter);
}
static void pvscsi_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct pvscsi_adapter *adapter = shost_priv(host);
scsi_remove_host(host);
__pvscsi_shutdown(adapter);
pvscsi_release_resources(adapter);
scsi_host_put(host);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
static struct pci_driver pvscsi_pci_driver = {
.name = "vmw_pvscsi",
.id_table = pvscsi_pci_tbl,
.probe = pvscsi_probe,
.remove = __devexit_p(pvscsi_remove),
.shutdown = pvscsi_shutdown,
};
static int __init pvscsi_init(void)
{
pr_info("%s - version %s\n",
PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
return pci_register_driver(&pvscsi_pci_driver);
}
static void __exit pvscsi_exit(void)
{
pci_unregister_driver(&pvscsi_pci_driver);
}
module_init(pvscsi_init);
module_exit(pvscsi_exit);
| gpl-2.0 |
moguriso/isw11sc-kernel | net/ipv6/xfrm6_tunnel.c | 7497 | 10167 | /*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
* YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*
* Based on net/ipv4/xfrm4_tunnel.c
*
*/
#include <linux/module.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ipv6.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/mutex.h>
#include <net/netns/generic.h>
#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
#define XFRM6_TUNNEL_SPI_MIN 1
#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
struct xfrm6_tunnel_net {
struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
u32 spi;
};
static int xfrm6_tunnel_net_id __read_mostly;
static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
{
return net_generic(net, xfrm6_tunnel_net_id);
}
/*
* xfrm_tunnel_spi things are for allocating unique id ("spi")
* per xfrm_address_t.
*/
struct xfrm6_tunnel_spi {
struct hlist_node list_byaddr;
struct hlist_node list_byspi;
xfrm_address_t addr;
u32 spi;
atomic_t refcnt;
struct rcu_head rcu_head;
};
static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
{
unsigned h;
h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
h ^= h >> 16;
h ^= h >> 8;
h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
return h;
}
static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
{
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
}
static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos;
hlist_for_each_entry_rcu(x6spi, pos,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) {
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
return x6spi;
}
return NULL;
}
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
rcu_read_lock_bh();
x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
spi = x6spi ? x6spi->spi : 0;
rcu_read_unlock_bh();
return htonl(spi);
}
EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
int index = xfrm6_tunnel_spi_hash_byspi(spi);
struct hlist_node *pos;
hlist_for_each_entry(x6spi, pos,
&xfrm6_tn->spi_byspi[index],
list_byspi) {
if (x6spi->spi == spi)
return -1;
}
return index;
}
static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
u32 spi;
struct xfrm6_tunnel_spi *x6spi;
int index;
if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
else
xfrm6_tn->spi++;
for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
spi = 0;
goto out;
alloc_spi:
xfrm6_tn->spi = spi;
x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
if (!x6spi)
goto out;
memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
x6spi->spi = spi;
atomic_set(&x6spi->refcnt, 1);
hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
index = xfrm6_tunnel_spi_hash_byaddr(saddr);
hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
out:
return spi;
}
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
if (x6spi) {
atomic_inc(&x6spi->refcnt);
spi = x6spi->spi;
} else
spi = __xfrm6_tunnel_alloc_spi(net, saddr);
spin_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
static void x6spi_destroy_rcu(struct rcu_head *head)
{
kmem_cache_free(xfrm6_tunnel_spi_kmem,
container_of(head, struct xfrm6_tunnel_spi, rcu_head));
}
static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos, *n;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, pos, n,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
if (atomic_dec_and_test(&x6spi->refcnt)) {
hlist_del_rcu(&x6spi->list_byaddr);
hlist_del_rcu(&x6spi->list_byspi);
call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
break;
}
}
}
spin_unlock_bh(&xfrm6_tunnel_spi_lock);
}
static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
skb_push(skb, -skb_network_offset(skb));
return 0;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return skb_network_header(skb)[IP6CB(skb)->nhoff];
}
static int xfrm6_tunnel_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct ipv6hdr *iph = ipv6_hdr(skb);
__be32 spi;
spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
/* xfrm6_tunnel native err handling */
switch (type) {
case ICMPV6_DEST_UNREACH:
switch (code) {
case ICMPV6_NOROUTE:
case ICMPV6_ADM_PROHIBITED:
case ICMPV6_NOT_NEIGHBOUR:
case ICMPV6_ADDR_UNREACH:
case ICMPV6_PORT_UNREACH:
default:
break;
}
break;
case ICMPV6_PKT_TOOBIG:
break;
case ICMPV6_TIME_EXCEED:
switch (code) {
case ICMPV6_EXC_HOPLIMIT:
break;
case ICMPV6_EXC_FRAGTIME:
default:
break;
}
break;
case ICMPV6_PARAMPROB:
switch (code) {
case ICMPV6_HDR_FIELD: break;
case ICMPV6_UNK_NEXTHDR: break;
case ICMPV6_UNK_OPTION: break;
}
break;
default:
break;
}
return 0;
}
static int xfrm6_tunnel_init_state(struct xfrm_state *x)
{
if (x->props.mode != XFRM_MODE_TUNNEL)
return -EINVAL;
if (x->encap)
return -EINVAL;
x->props.header_len = sizeof(struct ipv6hdr);
return 0;
}
static void xfrm6_tunnel_destroy(struct xfrm_state *x)
{
struct net *net = xs_net(x);
xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
}
static const struct xfrm_type xfrm6_tunnel_type = {
.description = "IP6IP6",
.owner = THIS_MODULE,
.proto = IPPROTO_IPV6,
.init_state = xfrm6_tunnel_init_state,
.destructor = xfrm6_tunnel_destroy,
.input = xfrm6_tunnel_input,
.output = xfrm6_tunnel_output,
};
static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
static int __net_init xfrm6_tunnel_net_init(struct net *net)
{
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
xfrm6_tn->spi = 0;
return 0;
}
static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
{
}
static struct pernet_operations xfrm6_tunnel_net_ops = {
.init = xfrm6_tunnel_net_init,
.exit = xfrm6_tunnel_net_exit,
.id = &xfrm6_tunnel_net_id,
.size = sizeof(struct xfrm6_tunnel_net),
};
static int __init xfrm6_tunnel_init(void)
{
int rv;
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
if (rv < 0)
goto out_pernet;
rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
if (rv < 0)
goto out_type;
rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
if (rv < 0)
goto out_xfrm6;
rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
if (rv < 0)
goto out_xfrm46;
return 0;
out_xfrm46:
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
out_xfrm6:
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
out_type:
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
out_pernet:
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
return rv;
}
static void __exit xfrm6_tunnel_fini(void)
{
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}
module_init(xfrm6_tunnel_init);
module_exit(xfrm6_tunnel_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
| gpl-2.0 |
Fusion-Devices/android_kernel_cyanogen_msm8916 | drivers/media/dvb-frontends/stb0899_algo.c | 7497 | 50478 | /*
STB0899 Multistandard Frontend driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
Copyright (C) ST Microelectronics
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "stb0899_drv.h"
#include "stb0899_priv.h"
#include "stb0899_reg.h"
static inline u32 stb0899_do_div(u64 n, u32 d)
{
/* wrap do_div() for ease of use */
do_div(n, d);
return n;
}
#if 0
/* These functions are currently unused */
/*
* stb0899_calc_srate
* Compute symbol rate
*/
static u32 stb0899_calc_srate(u32 master_clk, u8 *sfr)
{
u64 tmp;
/* srate = (SFR * master_clk) >> 20 */
/* sfr is of size 20 bit, stored with an offset of 4 bit */
tmp = (((u32)sfr[0]) << 16) | (((u32)sfr[1]) << 8) | sfr[2];
tmp &= ~0xf;
tmp *= master_clk;
tmp >>= 24;
return tmp;
}
/*
* stb0899_get_srate
* Get the current symbol rate
*/
static u32 stb0899_get_srate(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
u8 sfr[3];
stb0899_read_regs(state, STB0899_SFRH, sfr, 3);
return stb0899_calc_srate(internal->master_clk, sfr);
}
#endif
/*
* stb0899_set_srate
* Set symbol frequency
* MasterClock: master clock frequency (hz)
* SymbolRate: symbol rate (bauds)
* return symbol frequency
*/
static u32 stb0899_set_srate(struct stb0899_state *state, u32 master_clk, u32 srate)
{
u32 tmp;
u8 sfr[3];
dprintk(state->verbose, FE_DEBUG, 1, "-->");
/*
* in order to have the maximum precision, the symbol rate entered into
* the chip is computed as the closest value of the "true value".
* In this purpose, the symbol rate value is rounded (1 is added on the bit
* below the LSB )
*
* srate = (SFR * master_clk) >> 20
* <=>
* SFR = srate << 20 / master_clk
*
* rounded:
* SFR = (srate << 21 + master_clk) / (2 * master_clk)
*
* stored as 20 bit number with an offset of 4 bit:
* sfr = SFR << 4;
*/
tmp = stb0899_do_div((((u64)srate) << 21) + master_clk, 2 * master_clk);
tmp <<= 4;
sfr[0] = tmp >> 16;
sfr[1] = tmp >> 8;
sfr[2] = tmp;
stb0899_write_regs(state, STB0899_SFRH, sfr, 3);
return srate;
}
/*
* stb0899_calc_derot_time
* Compute the amount of time needed by the derotator to lock
* SymbolRate: Symbol rate
* return: derotator time constant (ms)
*/
static long stb0899_calc_derot_time(long srate)
{
if (srate > 0)
return (100000 / (srate / 1000));
else
return 0;
}
/*
* stb0899_carr_width
* Compute the width of the carrier
* return: width of carrier (kHz or Mhz)
*/
long stb0899_carr_width(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
return (internal->srate + (internal->srate * internal->rolloff) / 100);
}
/*
* stb0899_first_subrange
* Compute the first subrange of the search
*/
static void stb0899_first_subrange(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
struct stb0899_config *config = state->config;
int range = 0;
u32 bandwidth = 0;
if (config->tuner_get_bandwidth) {
stb0899_i2c_gate_ctrl(&state->frontend, 1);
config->tuner_get_bandwidth(&state->frontend, &bandwidth);
stb0899_i2c_gate_ctrl(&state->frontend, 0);
range = bandwidth - stb0899_carr_width(state) / 2;
}
if (range > 0)
internal->sub_range = min(internal->srch_range, range);
else
internal->sub_range = 0;
internal->freq = params->freq;
internal->tuner_offst = 0L;
internal->sub_dir = 1;
}
/*
* stb0899_check_tmg
* check for timing lock
* internal.Ttiming: time to wait for loop lock
*/
static enum stb0899_status stb0899_check_tmg(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
int lock;
u8 reg;
s8 timing;
msleep(internal->t_derot);
stb0899_write_reg(state, STB0899_RTF, 0xf2);
reg = stb0899_read_reg(state, STB0899_TLIR);
lock = STB0899_GETFIELD(TLIR_TMG_LOCK_IND, reg);
timing = stb0899_read_reg(state, STB0899_RTF);
if (lock >= 42) {
if ((lock > 48) && (abs(timing) >= 110)) {
internal->status = ANALOGCARRIER;
dprintk(state->verbose, FE_DEBUG, 1, "-->ANALOG Carrier !");
} else {
internal->status = TIMINGOK;
dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK !");
}
} else {
internal->status = NOTIMING;
dprintk(state->verbose, FE_DEBUG, 1, "-->NO TIMING !");
}
return internal->status;
}
/*
* stb0899_search_tmg
* perform a fs/2 zig-zag to find timing
*/
static enum stb0899_status stb0899_search_tmg(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
short int derot_step, derot_freq = 0, derot_limit, next_loop = 3;
int index = 0;
u8 cfr[2];
internal->status = NOTIMING;
/* timing loop computation & symbol rate optimisation */
derot_limit = (internal->sub_range / 2L) / internal->mclk;
derot_step = (params->srate / 2L) / internal->mclk;
while ((stb0899_check_tmg(state) != TIMINGOK) && next_loop) {
index++;
derot_freq += index * internal->direction * derot_step; /* next derot zig zag position */
if (abs(derot_freq) > derot_limit)
next_loop--;
if (next_loop) {
STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
}
internal->direction = -internal->direction; /* Change zigzag direction */
}
if (internal->status == TIMINGOK) {
stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
dprintk(state->verbose, FE_DEBUG, 1, "------->TIMING OK ! Derot Freq = %d", internal->derot_freq);
}
return internal->status;
}
/*
* stb0899_check_carrier
* Check for carrier found
*/
static enum stb0899_status stb0899_check_carrier(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
u8 reg;
msleep(internal->t_derot); /* wait for derotator ok */
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
stb0899_write_reg(state, STB0899_CFD, reg);
reg = stb0899_read_reg(state, STB0899_DSTATUS);
dprintk(state->verbose, FE_DEBUG, 1, "--------------------> STB0899_DSTATUS=[0x%02x]", reg);
if (STB0899_GETFIELD(CARRIER_FOUND, reg)) {
internal->status = CARRIEROK;
dprintk(state->verbose, FE_DEBUG, 1, "-------------> CARRIEROK !");
} else {
internal->status = NOCARRIER;
dprintk(state->verbose, FE_DEBUG, 1, "-------------> NOCARRIER !");
}
return internal->status;
}
/*
* stb0899_search_carrier
* Search for a QPSK carrier with the derotator
*/
static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3;
int index = 0;
u8 cfr[2];
u8 reg;
internal->status = NOCARRIER;
derot_limit = (internal->sub_range / 2L) / internal->mclk;
derot_freq = internal->derot_freq;
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
stb0899_write_reg(state, STB0899_CFD, reg);
do {
dprintk(state->verbose, FE_DEBUG, 1, "Derot Freq=%d, mclk=%d", derot_freq, internal->mclk);
if (stb0899_check_carrier(state) == NOCARRIER) {
index++;
last_derot_freq = derot_freq;
derot_freq += index * internal->direction * internal->derot_step; /* next zig zag derotator position */
if(abs(derot_freq) > derot_limit)
next_loop--;
if (next_loop) {
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
stb0899_write_reg(state, STB0899_CFD, reg);
STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
}
}
internal->direction = -internal->direction; /* Change zigzag direction */
} while ((internal->status != CARRIEROK) && next_loop);
if (internal->status == CARRIEROK) {
stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
dprintk(state->verbose, FE_DEBUG, 1, "----> CARRIER OK !, Derot Freq=%d", internal->derot_freq);
} else {
internal->derot_freq = last_derot_freq;
}
return internal->status;
}
/*
* stb0899_check_data
* Check for data found
*/
static enum stb0899_status stb0899_check_data(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
int lock = 0, index = 0, dataTime = 500, loop;
u8 reg;
internal->status = NODATA;
/* RESET FEC */
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESACS, reg, 1);
stb0899_write_reg(state, STB0899_TSTRES, reg);
msleep(1);
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESACS, reg, 0);
stb0899_write_reg(state, STB0899_TSTRES, reg);
if (params->srate <= 2000000)
dataTime = 2000;
else if (params->srate <= 5000000)
dataTime = 1500;
else if (params->srate <= 15000000)
dataTime = 1000;
else
dataTime = 500;
/* clear previous failed END_LOOPVIT */
stb0899_read_reg(state, STB0899_VSTATUS);
stb0899_write_reg(state, STB0899_DSTATUS2, 0x00); /* force search loop */
while (1) {
/* WARNING! VIT LOCKED has to be tested before VIT_END_LOOOP */
reg = stb0899_read_reg(state, STB0899_VSTATUS);
lock = STB0899_GETFIELD(VSTATUS_LOCKEDVIT, reg);
loop = STB0899_GETFIELD(VSTATUS_END_LOOPVIT, reg);
if (lock || loop || (index > dataTime))
break;
index++;
}
if (lock) { /* DATA LOCK indicator */
internal->status = DATAOK;
dprintk(state->verbose, FE_DEBUG, 1, "-----------------> DATA OK !");
}
return internal->status;
}
/*
* stb0899_search_data
* Search for a QPSK carrier with the derotator
*/
static enum stb0899_status stb0899_search_data(struct stb0899_state *state)
{
short int derot_freq, derot_step, derot_limit, next_loop = 3;
u8 cfr[2];
u8 reg;
int index = 1;
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
derot_step = (params->srate / 4L) / internal->mclk;
derot_limit = (internal->sub_range / 2L) / internal->mclk;
derot_freq = internal->derot_freq;
do {
if ((internal->status != CARRIEROK) || (stb0899_check_data(state) != DATAOK)) {
derot_freq += index * internal->direction * derot_step; /* next zig zag derotator position */
if (abs(derot_freq) > derot_limit)
next_loop--;
if (next_loop) {
dprintk(state->verbose, FE_DEBUG, 1, "Derot freq=%d, mclk=%d", derot_freq, internal->mclk);
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
stb0899_write_reg(state, STB0899_CFD, reg);
STB0899_SETFIELD_VAL(CFRM, cfr[0], MSB(state->config->inversion * derot_freq));
STB0899_SETFIELD_VAL(CFRL, cfr[1], LSB(state->config->inversion * derot_freq));
stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* derotator frequency */
stb0899_check_carrier(state);
index++;
}
}
internal->direction = -internal->direction; /* change zig zag direction */
} while ((internal->status != DATAOK) && next_loop);
if (internal->status == DATAOK) {
stb0899_read_regs(state, STB0899_CFRM, cfr, 2); /* get derotator frequency */
internal->derot_freq = state->config->inversion * MAKEWORD16(cfr[0], cfr[1]);
dprintk(state->verbose, FE_DEBUG, 1, "------> DATAOK ! Derot Freq=%d", internal->derot_freq);
}
return internal->status;
}
/*
* stb0899_check_range
* check if the found frequency is in the correct range
*/
static enum stb0899_status stb0899_check_range(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
int range_offst, tp_freq;
range_offst = internal->srch_range / 2000;
tp_freq = internal->freq + (internal->derot_freq * internal->mclk) / 1000;
if ((tp_freq >= params->freq - range_offst) && (tp_freq <= params->freq + range_offst)) {
internal->status = RANGEOK;
dprintk(state->verbose, FE_DEBUG, 1, "----> RANGEOK !");
} else {
internal->status = OUTOFRANGE;
dprintk(state->verbose, FE_DEBUG, 1, "----> OUT OF RANGE !");
}
return internal->status;
}
/*
* NextSubRange
* Compute the next subrange of the search
*/
static void next_sub_range(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_params *params = &state->params;
long old_sub_range;
if (internal->sub_dir > 0) {
old_sub_range = internal->sub_range;
internal->sub_range = min((internal->srch_range / 2) -
(internal->tuner_offst + internal->sub_range / 2),
internal->sub_range);
if (internal->sub_range < 0)
internal->sub_range = 0;
internal->tuner_offst += (old_sub_range + internal->sub_range) / 2;
}
internal->freq = params->freq + (internal->sub_dir * internal->tuner_offst) / 1000;
internal->sub_dir = -internal->sub_dir;
}
/*
* stb0899_dvbs_algo
* Search for a signal, timing, carrier and data for a
* given frequency in a given range
*/
enum stb0899_status stb0899_dvbs_algo(struct stb0899_state *state)
{
struct stb0899_params *params = &state->params;
struct stb0899_internal *internal = &state->internal;
struct stb0899_config *config = state->config;
u8 bclc, reg;
u8 cfr[2];
u8 eq_const[10];
s32 clnI = 3;
u32 bandwidth = 0;
/* BETA values rated @ 99MHz */
s32 betaTab[5][4] = {
/* 5 10 20 30MBps */
{ 37, 34, 32, 31 }, /* QPSK 1/2 */
{ 37, 35, 33, 31 }, /* QPSK 2/3 */
{ 37, 35, 33, 31 }, /* QPSK 3/4 */
{ 37, 36, 33, 32 }, /* QPSK 5/6 */
{ 37, 36, 33, 32 } /* QPSK 7/8 */
};
internal->direction = 1;
stb0899_set_srate(state, internal->master_clk, params->srate);
/* Carrier loop optimization versus symbol rate for acquisition*/
if (params->srate <= 5000000) {
stb0899_write_reg(state, STB0899_ACLC, 0x89);
bclc = stb0899_read_reg(state, STB0899_BCLC);
STB0899_SETFIELD_VAL(BETA, bclc, 0x1c);
stb0899_write_reg(state, STB0899_BCLC, bclc);
clnI = 0;
} else if (params->srate <= 15000000) {
stb0899_write_reg(state, STB0899_ACLC, 0xc9);
bclc = stb0899_read_reg(state, STB0899_BCLC);
STB0899_SETFIELD_VAL(BETA, bclc, 0x22);
stb0899_write_reg(state, STB0899_BCLC, bclc);
clnI = 1;
} else if(params->srate <= 25000000) {
stb0899_write_reg(state, STB0899_ACLC, 0x89);
bclc = stb0899_read_reg(state, STB0899_BCLC);
STB0899_SETFIELD_VAL(BETA, bclc, 0x27);
stb0899_write_reg(state, STB0899_BCLC, bclc);
clnI = 2;
} else {
stb0899_write_reg(state, STB0899_ACLC, 0xc8);
bclc = stb0899_read_reg(state, STB0899_BCLC);
STB0899_SETFIELD_VAL(BETA, bclc, 0x29);
stb0899_write_reg(state, STB0899_BCLC, bclc);
clnI = 3;
}
dprintk(state->verbose, FE_DEBUG, 1, "Set the timing loop to acquisition");
/* Set the timing loop to acquisition */
stb0899_write_reg(state, STB0899_RTC, 0x46);
stb0899_write_reg(state, STB0899_CFD, 0xee);
/* !! WARNING !!
* Do not read any status variables while acquisition,
* If any needed, read before the acquisition starts
* querying status while acquiring causes the
* acquisition to go bad and hence no locks.
*/
dprintk(state->verbose, FE_DEBUG, 1, "Derot Percent=%d Srate=%d mclk=%d",
internal->derot_percent, params->srate, internal->mclk);
/* Initial calculations */
internal->derot_step = internal->derot_percent * (params->srate / 1000L) / internal->mclk; /* DerotStep/1000 * Fsymbol */
internal->t_derot = stb0899_calc_derot_time(params->srate);
internal->t_data = 500;
dprintk(state->verbose, FE_DEBUG, 1, "RESET stream merger");
/* RESET Stream merger */
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESRS, reg, 1);
stb0899_write_reg(state, STB0899_TSTRES, reg);
/*
* Set KDIVIDER to an intermediate value between
* 1/2 and 7/8 for acquisition
*/
reg = stb0899_read_reg(state, STB0899_DEMAPVIT);
STB0899_SETFIELD_VAL(DEMAPVIT_KDIVIDER, reg, 60);
stb0899_write_reg(state, STB0899_DEMAPVIT, reg);
stb0899_write_reg(state, STB0899_EQON, 0x01); /* Equalizer OFF while acquiring */
stb0899_write_reg(state, STB0899_VITSYNC, 0x19);
stb0899_first_subrange(state);
do {
/* Initialisations */
cfr[0] = cfr[1] = 0;
stb0899_write_regs(state, STB0899_CFRM, cfr, 2); /* RESET derotator frequency */
stb0899_write_reg(state, STB0899_RTF, 0);
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 1);
stb0899_write_reg(state, STB0899_CFD, reg);
internal->derot_freq = 0;
internal->status = NOAGC1;
/* enable tuner I/O */
stb0899_i2c_gate_ctrl(&state->frontend, 1);
/* Move tuner to frequency */
dprintk(state->verbose, FE_DEBUG, 1, "Tuner set frequency");
if (state->config->tuner_set_frequency)
state->config->tuner_set_frequency(&state->frontend, internal->freq);
if (state->config->tuner_get_frequency)
state->config->tuner_get_frequency(&state->frontend, &internal->freq);
msleep(internal->t_agc1 + internal->t_agc2 + internal->t_derot); /* AGC1, AGC2 and timing loop */
dprintk(state->verbose, FE_DEBUG, 1, "current derot freq=%d", internal->derot_freq);
internal->status = AGC1OK;
/* There is signal in the band */
if (config->tuner_get_bandwidth)
config->tuner_get_bandwidth(&state->frontend, &bandwidth);
/* disable tuner I/O */
stb0899_i2c_gate_ctrl(&state->frontend, 0);
if (params->srate <= bandwidth / 2)
stb0899_search_tmg(state); /* For low rates (SCPC) */
else
stb0899_check_tmg(state); /* For high rates (MCPC) */
if (internal->status == TIMINGOK) {
dprintk(state->verbose, FE_DEBUG, 1,
"TIMING OK ! Derot freq=%d, mclk=%d",
internal->derot_freq, internal->mclk);
if (stb0899_search_carrier(state) == CARRIEROK) { /* Search for carrier */
dprintk(state->verbose, FE_DEBUG, 1,
"CARRIER OK ! Derot freq=%d, mclk=%d",
internal->derot_freq, internal->mclk);
if (stb0899_search_data(state) == DATAOK) { /* Check for data */
dprintk(state->verbose, FE_DEBUG, 1,
"DATA OK ! Derot freq=%d, mclk=%d",
internal->derot_freq, internal->mclk);
if (stb0899_check_range(state) == RANGEOK) {
dprintk(state->verbose, FE_DEBUG, 1,
"RANGE OK ! derot freq=%d, mclk=%d",
internal->derot_freq, internal->mclk);
internal->freq = params->freq + ((internal->derot_freq * internal->mclk) / 1000);
reg = stb0899_read_reg(state, STB0899_PLPARM);
internal->fecrate = STB0899_GETFIELD(VITCURPUN, reg);
dprintk(state->verbose, FE_DEBUG, 1,
"freq=%d, internal resultant freq=%d",
params->freq, internal->freq);
dprintk(state->verbose, FE_DEBUG, 1,
"internal puncture rate=%d",
internal->fecrate);
}
}
}
}
if (internal->status != RANGEOK)
next_sub_range(state);
} while (internal->sub_range && internal->status != RANGEOK);
/* Set the timing loop to tracking */
stb0899_write_reg(state, STB0899_RTC, 0x33);
stb0899_write_reg(state, STB0899_CFD, 0xf7);
/* if locked and range ok, set Kdiv */
if (internal->status == RANGEOK) {
dprintk(state->verbose, FE_DEBUG, 1, "Locked & Range OK !");
stb0899_write_reg(state, STB0899_EQON, 0x41); /* Equalizer OFF while acquiring */
stb0899_write_reg(state, STB0899_VITSYNC, 0x39); /* SN to b'11 for acquisition */
/*
* Carrier loop optimization versus
* symbol Rate/Puncture Rate for Tracking
*/
reg = stb0899_read_reg(state, STB0899_BCLC);
switch (internal->fecrate) {
case STB0899_FEC_1_2: /* 13 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 0x1a);
STB0899_SETFIELD_VAL(BETA, reg, betaTab[0][clnI]);
stb0899_write_reg(state, STB0899_BCLC, reg);
break;
case STB0899_FEC_2_3: /* 18 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 44);
STB0899_SETFIELD_VAL(BETA, reg, betaTab[1][clnI]);
stb0899_write_reg(state, STB0899_BCLC, reg);
break;
case STB0899_FEC_3_4: /* 21 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 60);
STB0899_SETFIELD_VAL(BETA, reg, betaTab[2][clnI]);
stb0899_write_reg(state, STB0899_BCLC, reg);
break;
case STB0899_FEC_5_6: /* 24 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 75);
STB0899_SETFIELD_VAL(BETA, reg, betaTab[3][clnI]);
stb0899_write_reg(state, STB0899_BCLC, reg);
break;
case STB0899_FEC_6_7: /* 25 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 88);
stb0899_write_reg(state, STB0899_ACLC, 0x88);
stb0899_write_reg(state, STB0899_BCLC, 0x9a);
break;
case STB0899_FEC_7_8: /* 26 */
stb0899_write_reg(state, STB0899_DEMAPVIT, 94);
STB0899_SETFIELD_VAL(BETA, reg, betaTab[4][clnI]);
stb0899_write_reg(state, STB0899_BCLC, reg);
break;
default:
dprintk(state->verbose, FE_DEBUG, 1, "Unsupported Puncture Rate");
break;
}
/* release stream merger RESET */
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESRS, reg, 0);
stb0899_write_reg(state, STB0899_TSTRES, reg);
/* disable carrier detector */
reg = stb0899_read_reg(state, STB0899_CFD);
STB0899_SETFIELD_VAL(CFD_ON, reg, 0);
stb0899_write_reg(state, STB0899_CFD, reg);
stb0899_read_regs(state, STB0899_EQUAI1, eq_const, 10);
}
return internal->status;
}
/*
* stb0899_dvbs2_config_uwp
* Configure UWP state machine
*/
static void stb0899_dvbs2_config_uwp(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_config *config = state->config;
u32 uwp1, uwp2, uwp3, reg;
uwp1 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL1);
uwp2 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL2);
uwp3 = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_CNTRL3);
STB0899_SETFIELD_VAL(UWP_ESN0_AVE, uwp1, config->esno_ave);
STB0899_SETFIELD_VAL(UWP_ESN0_QUANT, uwp1, config->esno_quant);
STB0899_SETFIELD_VAL(UWP_TH_SOF, uwp1, config->uwp_threshold_sof);
STB0899_SETFIELD_VAL(FE_COARSE_TRK, uwp2, internal->av_frame_coarse);
STB0899_SETFIELD_VAL(FE_FINE_TRK, uwp2, internal->av_frame_fine);
STB0899_SETFIELD_VAL(UWP_MISS_TH, uwp2, config->miss_threshold);
STB0899_SETFIELD_VAL(UWP_TH_ACQ, uwp3, config->uwp_threshold_acq);
STB0899_SETFIELD_VAL(UWP_TH_TRACK, uwp3, config->uwp_threshold_track);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL1, STB0899_OFF0_UWP_CNTRL1, uwp1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL2, STB0899_OFF0_UWP_CNTRL2, uwp2);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_UWP_CNTRL3, STB0899_OFF0_UWP_CNTRL3, uwp3);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, SOF_SRCH_TO);
STB0899_SETFIELD_VAL(SOF_SEARCH_TIMEOUT, reg, config->sof_search_timeout);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_SOF_SRCH_TO, STB0899_OFF0_SOF_SRCH_TO, reg);
}
/*
* stb0899_dvbs2_config_csm_auto
* Set CSM to AUTO mode
*/
static void stb0899_dvbs2_config_csm_auto(struct stb0899_state *state)
{
u32 reg;
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, reg, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, reg);
}
static long Log2Int(int number)
{
int i;
i = 0;
while ((1 << i) <= abs(number))
i++;
if (number == 0)
i = 1;
return i - 1;
}
/*
* stb0899_dvbs2_calc_srate
* compute BTR_NOM_FREQ for the symbol rate
*/
static u32 stb0899_dvbs2_calc_srate(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_config *config = state->config;
u32 dec_ratio, dec_rate, decim, remain, intval, btr_nom_freq;
u32 master_clk, srate;
dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
dec_rate = Log2Int(dec_ratio);
decim = 1 << dec_rate;
master_clk = internal->master_clk / 1000;
srate = internal->srate / 1000;
if (decim <= 4) {
intval = (decim * (1 << (config->btr_nco_bits - 1))) / master_clk;
remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
} else {
intval = (1 << (config->btr_nco_bits - 1)) / (master_clk / 100) * decim / 100;
remain = (decim * (1 << (config->btr_nco_bits - 1))) % master_clk;
}
btr_nom_freq = (intval * srate) + ((remain * srate) / master_clk);
return btr_nom_freq;
}
/*
* stb0899_dvbs2_calc_dev
* compute the correction to be applied to symbol rate
*/
static u32 stb0899_dvbs2_calc_dev(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
u32 dec_ratio, correction, master_clk, srate;
dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
master_clk = internal->master_clk / 1000; /* for integer Caculation*/
srate = internal->srate / 1000; /* for integer Caculation*/
correction = (512 * master_clk) / (2 * dec_ratio * srate);
return correction;
}
/*
* stb0899_dvbs2_set_srate
* Set DVBS2 symbol rate
*/
static void stb0899_dvbs2_set_srate(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
u32 dec_ratio, dec_rate, win_sel, decim, f_sym, btr_nom_freq;
u32 correction, freq_adj, band_lim, decim_cntrl, reg;
u8 anti_alias;
/*set decimation to 1*/
dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
dec_rate = Log2Int(dec_ratio);
win_sel = 0;
if (dec_rate >= 5)
win_sel = dec_rate - 4;
decim = (1 << dec_rate);
/* (FSamp/Fsymbol *100) for integer Caculation */
f_sym = internal->master_clk / ((decim * internal->srate) / 1000);
if (f_sym <= 2250) /* don't band limit signal going into btr block*/
band_lim = 1;
else
band_lim = 0; /* band limit signal going into btr block*/
decim_cntrl = ((win_sel << 3) & 0x18) + ((band_lim << 5) & 0x20) + (dec_rate & 0x7);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DECIM_CNTRL, STB0899_OFF0_DECIM_CNTRL, decim_cntrl);
if (f_sym <= 3450)
anti_alias = 0;
else if (f_sym <= 4250)
anti_alias = 1;
else
anti_alias = 2;
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ANTI_ALIAS_SEL, STB0899_OFF0_ANTI_ALIAS_SEL, anti_alias);
btr_nom_freq = stb0899_dvbs2_calc_srate(state);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_NOM_FREQ, STB0899_OFF0_BTR_NOM_FREQ, btr_nom_freq);
correction = stb0899_dvbs2_calc_dev(state);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
STB0899_SETFIELD_VAL(BTR_FREQ_CORR, reg, correction);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
/* scale UWP+CSM frequency to sample rate*/
freq_adj = internal->srate / (internal->master_clk / 4096);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_FREQ_ADJ_SCALE, STB0899_OFF0_FREQ_ADJ_SCALE, freq_adj);
}
/*
* stb0899_dvbs2_set_btr_loopbw
* set bit timing loop bandwidth as a percentage of the symbol rate
*/
static void stb0899_dvbs2_set_btr_loopbw(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_config *config = state->config;
u32 sym_peak = 23, zeta = 707, loopbw_percent = 60;
s32 dec_ratio, dec_rate, k_btr1_rshft, k_btr1, k_btr0_rshft;
s32 k_btr0, k_btr2_rshft, k_direct_shift, k_indirect_shift;
u32 decim, K, wn, k_direct, k_indirect;
u32 reg;
dec_ratio = (internal->master_clk * 2) / (5 * internal->srate);
dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio;
dec_rate = Log2Int(dec_ratio);
decim = (1 << dec_rate);
sym_peak *= 576000;
K = (1 << config->btr_nco_bits) / (internal->master_clk / 1000);
K *= (internal->srate / 1000000) * decim; /*k=k 10^-8*/
if (K != 0) {
K = sym_peak / K;
wn = (4 * zeta * zeta) + 1000000;
wn = (2 * (loopbw_percent * 1000) * 40 * zeta) /wn; /*wn =wn 10^-8*/
k_indirect = (wn * wn) / K;
k_indirect = k_indirect; /*kindirect = kindirect 10^-6*/
k_direct = (2 * wn * zeta) / K; /*kDirect = kDirect 10^-2*/
k_direct *= 100;
k_direct_shift = Log2Int(k_direct) - Log2Int(10000) - 2;
k_btr1_rshft = (-1 * k_direct_shift) + config->btr_gain_shift_offset;
k_btr1 = k_direct / (1 << k_direct_shift);
k_btr1 /= 10000;
k_indirect_shift = Log2Int(k_indirect + 15) - 20 /*- 2*/;
k_btr0_rshft = (-1 * k_indirect_shift) + config->btr_gain_shift_offset;
k_btr0 = k_indirect * (1 << (-k_indirect_shift));
k_btr0 /= 1000000;
k_btr2_rshft = 0;
if (k_btr0_rshft > 15) {
k_btr2_rshft = k_btr0_rshft - 15;
k_btr0_rshft = 15;
}
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_LOOP_GAIN);
STB0899_SETFIELD_VAL(KBTR0_RSHFT, reg, k_btr0_rshft);
STB0899_SETFIELD_VAL(KBTR0, reg, k_btr0);
STB0899_SETFIELD_VAL(KBTR1_RSHFT, reg, k_btr1_rshft);
STB0899_SETFIELD_VAL(KBTR1, reg, k_btr1);
STB0899_SETFIELD_VAL(KBTR2_RSHFT, reg, k_btr2_rshft);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, reg);
} else
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_LOOP_GAIN, STB0899_OFF0_BTR_LOOP_GAIN, 0xc4c4f);
}
/*
* stb0899_dvbs2_set_carr_freq
* set nominal frequency for carrier search
*/
static void stb0899_dvbs2_set_carr_freq(struct stb0899_state *state, s32 carr_freq, u32 master_clk)
{
struct stb0899_config *config = state->config;
s32 crl_nom_freq;
u32 reg;
crl_nom_freq = (1 << config->crl_nco_bits) / master_clk;
crl_nom_freq *= carr_freq;
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, crl_nom_freq);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
}
/*
* stb0899_dvbs2_init_calc
* Initialize DVBS2 UWP, CSM, carrier and timing loops
*/
static void stb0899_dvbs2_init_calc(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
s32 steps, step_size;
u32 range, reg;
/* config uwp and csm */
stb0899_dvbs2_config_uwp(state);
stb0899_dvbs2_config_csm_auto(state);
/* initialize BTR */
stb0899_dvbs2_set_srate(state);
stb0899_dvbs2_set_btr_loopbw(state);
if (internal->srate / 1000000 >= 15)
step_size = (1 << 17) / 5;
else if (internal->srate / 1000000 >= 10)
step_size = (1 << 17) / 7;
else if (internal->srate / 1000000 >= 5)
step_size = (1 << 17) / 10;
else
step_size = (1 << 17) / 4;
range = internal->srch_range / 1000000;
steps = (10 * range * (1 << 17)) / (step_size * (internal->srate / 1000000));
steps = (steps + 6) / 10;
steps = (steps == 0) ? 1 : steps;
if (steps % 2 == 0)
stb0899_dvbs2_set_carr_freq(state, internal->center_freq -
(internal->step_size * (internal->srate / 20000000)),
(internal->master_clk) / 1000000);
else
stb0899_dvbs2_set_carr_freq(state, internal->center_freq, (internal->master_clk) / 1000000);
/*Set Carrier Search params (zigzag, num steps and freq step size*/
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, ACQ_CNTRL2);
STB0899_SETFIELD_VAL(ZIGZAG, reg, 1);
STB0899_SETFIELD_VAL(NUM_STEPS, reg, steps);
STB0899_SETFIELD_VAL(FREQ_STEPSIZE, reg, step_size);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQ_CNTRL2, STB0899_OFF0_ACQ_CNTRL2, reg);
}
/*
* stb0899_dvbs2_btr_init
* initialize the timing loop
*/
static void stb0899_dvbs2_btr_init(struct stb0899_state *state)
{
u32 reg;
/* set enable BTR loopback */
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_CNTRL);
STB0899_SETFIELD_VAL(INTRP_PHS_SENSE, reg, 1);
STB0899_SETFIELD_VAL(BTR_ERR_ENA, reg, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_CNTRL, STB0899_OFF0_BTR_CNTRL, reg);
/* fix btr freq accum at 0 */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x10000000);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_FREQ_INIT, STB0899_OFF0_BTR_FREQ_INIT, 0x00000000);
/* fix btr freq accum at 0 */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x10000000);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_BTR_PHS_INIT, STB0899_OFF0_BTR_PHS_INIT, 0x00000000);
}
/*
* stb0899_dvbs2_reacquire
* trigger a DVB-S2 acquisition
*/
static void stb0899_dvbs2_reacquire(struct stb0899_state *state)
{
u32 reg = 0;
/* demod soft reset */
STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
/*Reset Timing Loop */
stb0899_dvbs2_btr_init(state);
/* reset Carrier loop */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, (1 << 30));
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_FREQ_INIT, STB0899_OFF0_CRL_FREQ_INIT, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_LOOP_GAIN, STB0899_OFF0_CRL_LOOP_GAIN, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, (1 << 30));
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_PHS_INIT, STB0899_OFF0_CRL_PHS_INIT, 0);
/*release demod soft reset */
reg = 0;
STB0899_SETFIELD_VAL(DVBS2_RESET, reg, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_RESET_CNTRL, STB0899_OFF0_RESET_CNTRL, reg);
/* start acquisition process */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_ACQUIRE_TRIG, STB0899_OFF0_ACQUIRE_TRIG, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_LOCK_LOST, STB0899_OFF0_LOCK_LOST, 0);
/* equalizer Init */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 1);
/*Start equilizer */
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQUALIZER_INIT, STB0899_OFF0_EQUALIZER_INIT, 0);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0);
STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 0);
STB0899_SETFIELD_VAL(EQ_DELAY, reg, 0x05);
STB0899_SETFIELD_VAL(EQ_ADAPT_MODE, reg, 0x01);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
/* RESET Packet delineator */
stb0899_write_reg(state, STB0899_PDELCTRL, 0x4a);
}
/*
* stb0899_dvbs2_get_dmd_status
* get DVB-S2 Demod LOCK status
*/
static enum stb0899_status stb0899_dvbs2_get_dmd_status(struct stb0899_state *state, int timeout)
{
int time = -10, lock = 0, uwp, csm;
u32 reg;
do {
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STATUS);
dprintk(state->verbose, FE_DEBUG, 1, "DMD_STATUS=[0x%02x]", reg);
if (STB0899_GETFIELD(IF_AGC_LOCK, reg))
dprintk(state->verbose, FE_DEBUG, 1, "------------->IF AGC LOCKED !");
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_STAT2);
dprintk(state->verbose, FE_DEBUG, 1, "----------->DMD STAT2=[0x%02x]", reg);
uwp = STB0899_GETFIELD(UWP_LOCK, reg);
csm = STB0899_GETFIELD(CSM_LOCK, reg);
if (uwp && csm)
lock = 1;
time += 10;
msleep(10);
} while ((!lock) && (time <= timeout));
if (lock) {
dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 LOCK !");
return DVBS2_DEMOD_LOCK;
} else {
return DVBS2_DEMOD_NOLOCK;
}
}
/*
* stb0899_dvbs2_get_data_lock
* get FEC status
*/
static int stb0899_dvbs2_get_data_lock(struct stb0899_state *state, int timeout)
{
int time = 0, lock = 0;
u8 reg;
while ((!lock) && (time < timeout)) {
reg = stb0899_read_reg(state, STB0899_CFGPDELSTATUS1);
dprintk(state->verbose, FE_DEBUG, 1, "---------> CFGPDELSTATUS=[0x%02x]", reg);
lock = STB0899_GETFIELD(CFGPDELSTATUS_LOCK, reg);
time++;
}
return lock;
}
/*
* stb0899_dvbs2_get_fec_status
* get DVB-S2 FEC LOCK status
*/
static enum stb0899_status stb0899_dvbs2_get_fec_status(struct stb0899_state *state, int timeout)
{
int time = 0, Locked;
do {
Locked = stb0899_dvbs2_get_data_lock(state, 1);
time++;
msleep(1);
} while ((!Locked) && (time < timeout));
if (Locked) {
dprintk(state->verbose, FE_DEBUG, 1, "---------->DVB-S2 FEC LOCK !");
return DVBS2_FEC_LOCK;
} else {
return DVBS2_FEC_NOLOCK;
}
}
/*
* stb0899_dvbs2_init_csm
* set parameters for manual mode
*/
static void stb0899_dvbs2_init_csm(struct stb0899_state *state, int pilots, enum stb0899_modcod modcod)
{
struct stb0899_internal *internal = &state->internal;
s32 dvt_tbl = 1, two_pass = 0, agc_gain = 6, agc_shift = 0, loop_shift = 0, phs_diff_thr = 0x80;
s32 gamma_acq, gamma_rho_acq, gamma_trk, gamma_rho_trk, lock_count_thr;
u32 csm1, csm2, csm3, csm4;
if (((internal->master_clk / internal->srate) <= 4) && (modcod <= 11) && (pilots == 1)) {
switch (modcod) {
case STB0899_QPSK_12:
gamma_acq = 25;
gamma_rho_acq = 2700;
gamma_trk = 12;
gamma_rho_trk = 180;
lock_count_thr = 8;
break;
case STB0899_QPSK_35:
gamma_acq = 38;
gamma_rho_acq = 7182;
gamma_trk = 14;
gamma_rho_trk = 308;
lock_count_thr = 8;
break;
case STB0899_QPSK_23:
gamma_acq = 42;
gamma_rho_acq = 9408;
gamma_trk = 17;
gamma_rho_trk = 476;
lock_count_thr = 8;
break;
case STB0899_QPSK_34:
gamma_acq = 53;
gamma_rho_acq = 16642;
gamma_trk = 19;
gamma_rho_trk = 646;
lock_count_thr = 8;
break;
case STB0899_QPSK_45:
gamma_acq = 53;
gamma_rho_acq = 17119;
gamma_trk = 22;
gamma_rho_trk = 880;
lock_count_thr = 8;
break;
case STB0899_QPSK_56:
gamma_acq = 55;
gamma_rho_acq = 19250;
gamma_trk = 23;
gamma_rho_trk = 989;
lock_count_thr = 8;
break;
case STB0899_QPSK_89:
gamma_acq = 60;
gamma_rho_acq = 24240;
gamma_trk = 24;
gamma_rho_trk = 1176;
lock_count_thr = 8;
break;
case STB0899_QPSK_910:
gamma_acq = 66;
gamma_rho_acq = 29634;
gamma_trk = 24;
gamma_rho_trk = 1176;
lock_count_thr = 8;
break;
default:
gamma_acq = 66;
gamma_rho_acq = 29634;
gamma_trk = 24;
gamma_rho_trk = 1176;
lock_count_thr = 8;
break;
}
csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
STB0899_SETFIELD_VAL(CSM_AUTO_PARAM, csm1, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
csm2 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL2);
csm3 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL3);
csm4 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL4);
STB0899_SETFIELD_VAL(CSM_DVT_TABLE, csm1, dvt_tbl);
STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, two_pass);
STB0899_SETFIELD_VAL(CSM_AGC_GAIN, csm1, agc_gain);
STB0899_SETFIELD_VAL(CSM_AGC_SHIFT, csm1, agc_shift);
STB0899_SETFIELD_VAL(FE_LOOP_SHIFT, csm1, loop_shift);
STB0899_SETFIELD_VAL(CSM_GAMMA_ACQ, csm2, gamma_acq);
STB0899_SETFIELD_VAL(CSM_GAMMA_RHOACQ, csm2, gamma_rho_acq);
STB0899_SETFIELD_VAL(CSM_GAMMA_TRACK, csm3, gamma_trk);
STB0899_SETFIELD_VAL(CSM_GAMMA_RHOTRACK, csm3, gamma_rho_trk);
STB0899_SETFIELD_VAL(CSM_LOCKCOUNT_THRESH, csm4, lock_count_thr);
STB0899_SETFIELD_VAL(CSM_PHASEDIFF_THRESH, csm4, phs_diff_thr);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL2, STB0899_OFF0_CSM_CNTRL2, csm2);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL3, STB0899_OFF0_CSM_CNTRL3, csm3);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL4, STB0899_OFF0_CSM_CNTRL4, csm4);
}
}
/*
* stb0899_dvbs2_get_srate
* get DVB-S2 Symbol Rate
*/
static u32 stb0899_dvbs2_get_srate(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
struct stb0899_config *config = state->config;
u32 bTrNomFreq, srate, decimRate, intval1, intval2, reg;
int div1, div2, rem1, rem2;
div1 = config->btr_nco_bits / 2;
div2 = config->btr_nco_bits - div1 - 1;
bTrNomFreq = STB0899_READ_S2REG(STB0899_S2DEMOD, BTR_NOM_FREQ);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DECIM_CNTRL);
decimRate = STB0899_GETFIELD(DECIM_RATE, reg);
decimRate = (1 << decimRate);
intval1 = internal->master_clk / (1 << div1);
intval2 = bTrNomFreq / (1 << div2);
rem1 = internal->master_clk % (1 << div1);
rem2 = bTrNomFreq % (1 << div2);
/* only for integer calculation */
srate = (intval1 * intval2) + ((intval1 * rem2) / (1 << div2)) + ((intval2 * rem1) / (1 << div1));
srate /= decimRate; /*symbrate = (btrnomfreq_register_val*MasterClock)/2^(27+decim_rate_field) */
return srate;
}
/*
* stb0899_dvbs2_algo
* Search for signal, timing, carrier and data for a given
* frequency in a given range
*/
enum stb0899_status stb0899_dvbs2_algo(struct stb0899_state *state)
{
struct stb0899_internal *internal = &state->internal;
enum stb0899_modcod modcod;
s32 offsetfreq, searchTime, FecLockTime, pilots, iqSpectrum;
int i = 0;
u32 reg, csm1;
if (internal->srate <= 2000000) {
searchTime = 5000; /* 5000 ms max time to lock UWP and CSM, SYMB <= 2Mbs */
FecLockTime = 350; /* 350 ms max time to lock FEC, SYMB <= 2Mbs */
} else if (internal->srate <= 5000000) {
searchTime = 2500; /* 2500 ms max time to lock UWP and CSM, 2Mbs < SYMB <= 5Mbs */
FecLockTime = 170; /* 170 ms max time to lock FEC, 2Mbs< SYMB <= 5Mbs */
} else if (internal->srate <= 10000000) {
searchTime = 1500; /* 1500 ms max time to lock UWP and CSM, 5Mbs <SYMB <= 10Mbs */
FecLockTime = 80; /* 80 ms max time to lock FEC, 5Mbs< SYMB <= 10Mbs */
} else if (internal->srate <= 15000000) {
searchTime = 500; /* 500 ms max time to lock UWP and CSM, 10Mbs <SYMB <= 15Mbs */
FecLockTime = 50; /* 50 ms max time to lock FEC, 10Mbs< SYMB <= 15Mbs */
} else if (internal->srate <= 20000000) {
searchTime = 300; /* 300 ms max time to lock UWP and CSM, 15Mbs < SYMB <= 20Mbs */
FecLockTime = 30; /* 50 ms max time to lock FEC, 15Mbs< SYMB <= 20Mbs */
} else if (internal->srate <= 25000000) {
searchTime = 250; /* 250 ms max time to lock UWP and CSM, 20 Mbs < SYMB <= 25Mbs */
FecLockTime = 25; /* 25 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
} else {
searchTime = 150; /* 150 ms max time to lock UWP and CSM, SYMB > 25Mbs */
FecLockTime = 20; /* 20 ms max time to lock FEC, 20Mbs< SYMB <= 25Mbs */
}
/* Maintain Stream Merger in reset during acquisition */
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESRS, reg, 1);
stb0899_write_reg(state, STB0899_TSTRES, reg);
/* enable tuner I/O */
stb0899_i2c_gate_ctrl(&state->frontend, 1);
/* Move tuner to frequency */
if (state->config->tuner_set_frequency)
state->config->tuner_set_frequency(&state->frontend, internal->freq);
if (state->config->tuner_get_frequency)
state->config->tuner_get_frequency(&state->frontend, &internal->freq);
/* disable tuner I/O */
stb0899_i2c_gate_ctrl(&state->frontend, 0);
/* Set IF AGC to acquisition */
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 4);
STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 32);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
/* Initialisation */
stb0899_dvbs2_init_calc(state);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
switch (internal->inversion) {
case IQ_SWAP_OFF:
STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 0);
break;
case IQ_SWAP_ON:
STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 1);
break;
case IQ_SWAP_AUTO: /* use last successful search first */
STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, 1);
break;
}
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
stb0899_dvbs2_reacquire(state);
/* Wait for demod lock (UWP and CSM) */
internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
if (internal->status == DVBS2_DEMOD_LOCK) {
dprintk(state->verbose, FE_DEBUG, 1, "------------> DVB-S2 DEMOD LOCK !");
i = 0;
/* Demod Locked, check FEC status */
internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
/*If false lock (UWP and CSM Locked but no FEC) try 3 time max*/
while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
/* Read the frequency offset*/
offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
/* Set the Nominal frequency to the found frequency offset for the next reacquire*/
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
stb0899_dvbs2_reacquire(state);
internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
i++;
}
}
if (internal->status != DVBS2_FEC_LOCK) {
if (internal->inversion == IQ_SWAP_AUTO) {
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
iqSpectrum = STB0899_GETFIELD(SPECTRUM_INVERT, reg);
/* IQ Spectrum Inversion */
STB0899_SETFIELD_VAL(SPECTRUM_INVERT, reg, !iqSpectrum);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_DMD_CNTRL2, STB0899_OFF0_DMD_CNTRL2, reg);
/* start acquistion process */
stb0899_dvbs2_reacquire(state);
/* Wait for demod lock (UWP and CSM) */
internal->status = stb0899_dvbs2_get_dmd_status(state, searchTime);
if (internal->status == DVBS2_DEMOD_LOCK) {
i = 0;
/* Demod Locked, check FEC */
internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
/*try thrice for false locks, (UWP and CSM Locked but no FEC) */
while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
/* Read the frequency offset*/
offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
/* Set the Nominal frequency to the found frequency offset for the next reacquire*/
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_NOM_FREQ);
STB0899_SETFIELD_VAL(CRL_NOM_FREQ, reg, offsetfreq);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CRL_NOM_FREQ, STB0899_OFF0_CRL_NOM_FREQ, reg);
stb0899_dvbs2_reacquire(state);
internal->status = stb0899_dvbs2_get_fec_status(state, searchTime);
i++;
}
}
/*
if (pParams->DVBS2State == FE_DVBS2_FEC_LOCKED)
pParams->IQLocked = !iqSpectrum;
*/
}
}
if (internal->status == DVBS2_FEC_LOCK) {
dprintk(state->verbose, FE_DEBUG, 1, "----------------> DVB-S2 FEC Lock !");
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
(INRANGE(STB0899_QPSK_23, modcod, STB0899_QPSK_910)) &&
(pilots == 1)) {
stb0899_dvbs2_init_csm(state, pilots, modcod);
/* Wait for UWP,CSM and data LOCK 20ms max */
internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
i = 0;
while ((internal->status != DVBS2_FEC_LOCK) && (i < 3)) {
csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
csm1 = STB0899_READ_S2REG(STB0899_S2DEMOD, CSM_CNTRL1);
STB0899_SETFIELD_VAL(CSM_TWO_PASS, csm1, 0);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, csm1);
internal->status = stb0899_dvbs2_get_fec_status(state, FecLockTime);
i++;
}
}
if ((((10 * internal->master_clk) / (internal->srate / 10)) <= 410) &&
(INRANGE(STB0899_QPSK_12, modcod, STB0899_QPSK_35)) &&
(pilots == 1)) {
/* Equalizer Disable update */
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
STB0899_SETFIELD_VAL(EQ_DISABLE_UPDATE, reg, 1);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
}
/* slow down the Equalizer once locked */
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, EQ_CNTRL);
STB0899_SETFIELD_VAL(EQ_SHIFT, reg, 0x02);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_EQ_CNTRL, STB0899_OFF0_EQ_CNTRL, reg);
/* Store signal parameters */
offsetfreq = STB0899_READ_S2REG(STB0899_S2DEMOD, CRL_FREQ);
offsetfreq = offsetfreq / ((1 << 30) / 1000);
offsetfreq *= (internal->master_clk / 1000000);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, DMD_CNTRL2);
if (STB0899_GETFIELD(SPECTRUM_INVERT, reg))
offsetfreq *= -1;
internal->freq = internal->freq - offsetfreq;
internal->srate = stb0899_dvbs2_get_srate(state);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, UWP_STAT2);
internal->modcod = STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 2;
internal->pilots = STB0899_GETFIELD(UWP_DECODE_MOD, reg) & 0x01;
internal->frame_length = (STB0899_GETFIELD(UWP_DECODE_MOD, reg) >> 1) & 0x01;
/* Set IF AGC to tracking */
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL);
STB0899_SETFIELD_VAL(IF_LOOP_GAIN, reg, 3);
/* if QPSK 1/2,QPSK 3/5 or QPSK 2/3 set IF AGC reference to 16 otherwise 32*/
if (INRANGE(STB0899_QPSK_12, internal->modcod, STB0899_QPSK_23))
STB0899_SETFIELD_VAL(IF_AGC_REF, reg, 16);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL, STB0899_OFF0_IF_AGC_CNTRL, reg);
reg = STB0899_READ_S2REG(STB0899_S2DEMOD, IF_AGC_CNTRL2);
STB0899_SETFIELD_VAL(IF_AGC_DUMP_PER, reg, 7);
stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_IF_AGC_CNTRL2, STB0899_OFF0_IF_AGC_CNTRL2, reg);
}
/* Release Stream Merger Reset */
reg = stb0899_read_reg(state, STB0899_TSTRES);
STB0899_SETFIELD_VAL(FRESRS, reg, 0);
stb0899_write_reg(state, STB0899_TSTRES, reg);
return internal->status;
}
| gpl-2.0 |
AOSP-Jaguar/android_kernel_sony_msm8974 | drivers/gpu/drm/nouveau/nouveau_ioc32.c | 9289 | 2223 | /**
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
*
* \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Egbert Eich 2003,2004
* Copyright (C) Dave Airlie 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
#if 0
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
| gpl-2.0 |
mifl/android_kernel_pantech_im-860s | drivers/net/ethernet/chelsio/cxgb/pm3393.c | 10569 | 30323 | /*****************************************************************************
* *
* File: pm3393.c *
* $Revision: 1.16 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "gmac.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
#include <linux/crc32.h>
#include <linux/slab.h>
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define MAX_FRAME_SIZE 9600
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
SUNI1x10GEXP_BITMSK_TXXG_PADEN)
#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
/* Update statistics every 15 minutes */
#define STATS_TICK_SECS (15 * 60)
enum { /* RMON registers */
RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
};
struct _cmac_instance {
u8 enabled;
u8 fc;
u8 mac_addr[6];
};
static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
{
t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
return 0;
}
static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
{
t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
return 0;
}
/* Port reset. */
static int pm3393_reset(struct cmac *cmac)
{
return 0;
}
/*
* Enable interrupts for the PM3393
*
* 1. Enable PM3393 BLOCK interrupts.
* 2. Enable PM3393 Master Interrupt bit(INTE)
* 3. Enable ELMER's PM3393 bit.
* 4. Enable Terminator external interrupt.
*/
static int pm3393_interrupt_enable(struct cmac *cmac)
{
u32 pl_intr;
/* PM3393 - Enabling all hardware block interrupts.
*/
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
/* Don't interrupt on statistics overflow, we are polling */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
/* PM3393 - Global interrupt enable
*/
/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
/* TERMINATOR - PL_INTERUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
return 0;
}
static int pm3393_interrupt_disable(struct cmac *cmac)
{
u32 elmer;
/* PM3393 - Enabling HW interrupt blocks. */
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
/* PM3393 - Global interrupt enable */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
/* ELMER - External chip interrupts. */
t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
return 0;
}
static int pm3393_interrupt_clear(struct cmac *cmac)
{
u32 elmer;
u32 pl_intr;
u32 val32;
/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
* bit WCIMODE=0 for a clear-on-read.
*/
pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
&val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
/* PM3393 - Global interrupt status
*/
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
/* ELMER - External chip interrupts.
*/
t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
return 0;
}
/* Interrupt handler */
static int pm3393_interrupt_handler(struct cmac *cmac)
{
u32 master_intr_status;
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
if (netif_msg_intr(cmac->adapter))
dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
return 0;
}
static int pm3393_enable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
if (which & MAC_DIRECTION_TX) {
u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
if (cmac->instance->fc & PAUSE_RX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
if (cmac->instance->fc & PAUSE_TX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
}
cmac->instance->enabled |= which;
return 0;
}
static int pm3393_enable_port(struct cmac *cmac, int which)
{
/* Clear port statistics */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
udelay(2);
memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
pm3393_enable(cmac, which);
/*
* XXX This should be done by the PHY and preferably not at all.
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
t1_link_changed(cmac->adapter, 0);
return 0;
}
static int pm3393_disable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
if (which & MAC_DIRECTION_TX)
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
/*
* The disable is graceful. Give the PM3393 time. Can't wait very
* long here, we may be holding locks.
*/
udelay(20);
cmac->instance->enabled &= ~which;
return 0;
}
static int pm3393_loopback_enable(struct cmac *cmac)
{
return 0;
}
static int pm3393_loopback_disable(struct cmac *cmac)
{
return 0;
}
static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
mtu += 14 + 4;
if (mtu > MAX_FRAME_SIZE)
return -EINVAL;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
{
int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
u32 rx_mode;
/* Disable MAC RX before reconfiguring it */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
(u16)rx_mode);
if (t1_rx_mode_promisc(rm)) {
/* Promiscuous mode. */
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
}
if (t1_rx_mode_allmulti(rm)) {
/* Accept all multicast. */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
/* bit[23:28] */
bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
if (enabled)
pm3393_enable(cmac, MAC_DIRECTION_RX);
return 0;
}
static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
int *duplex, int *fc)
{
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = cmac->instance->fc;
return 0;
}
static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
int fc)
{
if (speed >= 0 && speed != SPEED_10000)
return -1;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -1;
if (fc & ~(PAUSE_TX | PAUSE_RX))
return -1;
if (fc != cmac->instance->fc) {
cmac->instance->fc = (u8) fc;
if (cmac->instance->enabled & MAC_DIRECTION_TX)
pm3393_enable(cmac, MAC_DIRECTION_TX);
}
return 0;
}
#define RMON_UPDATE(mac, name, stat_name) \
{ \
t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
(mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
((u64)(val1 & 0xffff) << 16) | \
((u64)(val2 & 0xff) << 32) | \
((mac)->stats.stat_name & \
0xffffff0000000000ULL); \
if (ro & \
(1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
(mac)->stats.stat_name += 1ULL << 40; \
}
static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
int flag)
{
u64 ro;
u32 val0, val1, val2, val3;
/* Snap the counters */
pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
/* Counter rollover, clear on read */
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
/* Rx stats */
RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
RxInternalMACRcvError);
RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
TxInternalMACXmitError);
RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
return &mac->stats;
}
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
memcpy(mac_addr, cmac->instance->mac_addr, 6);
return 0;
}
static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
/*
* MAC addr: 00:07:43:00:13:09
*
* ma[5] = 0x09
* ma[4] = 0x13
* ma[3] = 0x00
* ma[2] = 0x43
* ma[1] = 0x07
* ma[0] = 0x00
*
* The PM3393 requires byte swapping and reverse order entry
* when programming MAC addresses:
*
* low_bits[15:0] = ma[1]:ma[0]
* mid_bits[31:16] = ma[3]:ma[2]
* high_bits[47:32] = ma[5]:ma[4]
*/
/* Store local copy */
memcpy(cmac->instance->mac_addr, ma, 6);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
hi = ((u32) ma[5] << 8) | (u32) ma[4];
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
/* Set RXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
/* Set TXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
/* Setup Exact Match Filter 1 with our MAC address
*
* Must disable exact match filter before configuring it.
*/
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
val &= 0xff0f;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
val |= 0x0090;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static void pm3393_destroy(struct cmac *cmac)
{
kfree(cmac);
}
static struct cmac_ops pm3393_ops = {
.destroy = pm3393_destroy,
.reset = pm3393_reset,
.interrupt_enable = pm3393_interrupt_enable,
.interrupt_disable = pm3393_interrupt_disable,
.interrupt_clear = pm3393_interrupt_clear,
.interrupt_handler = pm3393_interrupt_handler,
.enable = pm3393_enable_port,
.disable = pm3393_disable,
.loopback_enable = pm3393_loopback_enable,
.loopback_disable = pm3393_loopback_disable,
.set_mtu = pm3393_set_mtu,
.set_rx_mode = pm3393_set_rx_mode,
.get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
.set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
.statistics_update = pm3393_update_statistics,
.macaddress_get = pm3393_macaddress_get,
.macaddress_set = pm3393_macaddress_set
};
static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
{
struct cmac *cmac;
cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
if (!cmac)
return NULL;
cmac->ops = &pm3393_ops;
cmac->instance = (cmac_instance *) (cmac + 1);
cmac->adapter = adapter;
cmac->instance->fc = PAUSE_TX | PAUSE_RX;
t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
/* For T1 use timer based Mac flow control. */
t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
/* Setup Exact Match Filter 0 to allow broadcast packets.
*/
t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
return cmac;
}
static int pm3393_mac_reset(adapter_t * adapter)
{
u32 val;
u32 x;
u32 is_pl4_reset_finished;
u32 is_pl4_outof_lock;
u32 is_xaui_mabc_pll_locked;
u32 successful_reset;
int i;
/* The following steps are required to properly reset
* the PM3393. This information is provided in the
* PM3393 datasheet (Issue 2: November 2002)
* section 13.1 -- Device Reset.
*
* The PM3393 has three types of components that are
* individually reset:
*
* DRESETB - Digital circuitry
* PL4_ARESETB - PL4 analog circuitry
* XAUI_ARESETB - XAUI bus analog circuitry
*
* Steps to reset PM3393 using RSTB pin:
*
* 1. Assert RSTB pin low ( write 0 )
* 2. Wait at least 1ms to initiate a complete initialization of device.
* 3. Wait until all external clocks and REFSEL are stable.
* 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
* 5. De-assert RSTB ( write 1 )
* 6. Wait until internal timers to expires after ~14ms.
* - Allows analog clock synthesizer(PL4CSU) to stabilize to
* selected reference frequency before allowing the digital
* portion of the device to operate.
* 7. Wait at least 200us for XAUI interface to stabilize.
* 8. Verify the PM3393 came out of reset successfully.
* Set successful reset flag if everything worked else try again
* a few more times.
*/
successful_reset = 0;
for (i = 0; i < 3 && !successful_reset; i++) {
/* 1 */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 2 */
msleep(1);
/* 3 */
msleep(1);
/* 4 */
msleep(2 /*1 extra ms for safety */ );
/* 5 */
val |= 1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 6 */
msleep(15 /*1 extra ms for safety */ );
/* 7 */
msleep(1);
/* 8 */
/* Has PL4 analog block come out of reset correctly? */
t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
* figure out why? */
/* Have all PL4 block clocks locked? */
x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
/*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
is_pl4_outof_lock = (val & x);
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
/* Has the XAUI MABC PLL circuitry stablized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
if (netif_msg_hw(adapter))
dev_dbg(&adapter->pdev->dev,
"PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
"is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
i, is_pl4_reset_finished, val,
is_pl4_outof_lock, is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
const struct gmac t1_pm3393_ops = {
.stats_update_period = STATS_TICK_SECS,
.create = pm3393_mac_create,
.reset = pm3393_mac_reset,
};
| gpl-2.0 |
ptmr3/android_kernel_lge_g3 | drivers/net/ethernet/chelsio/cxgb/pm3393.c | 10569 | 30323 | /*****************************************************************************
* *
* File: pm3393.c *
* $Revision: 1.16 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "gmac.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
#include <linux/crc32.h>
#include <linux/slab.h>
#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define MAX_FRAME_SIZE 9600
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
SUNI1x10GEXP_BITMSK_TXXG_PADEN)
#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
/* Update statistics every 15 minutes */
#define STATS_TICK_SECS (15 * 60)
enum { /* RMON registers */
RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
};
struct _cmac_instance {
u8 enabled;
u8 fc;
u8 mac_addr[6];
};
static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
{
t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
return 0;
}
static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
{
t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
return 0;
}
/* Port reset. */
static int pm3393_reset(struct cmac *cmac)
{
return 0;
}
/*
* Enable interrupts for the PM3393
*
* 1. Enable PM3393 BLOCK interrupts.
* 2. Enable PM3393 Master Interrupt bit(INTE)
* 3. Enable ELMER's PM3393 bit.
* 4. Enable Terminator external interrupt.
*/
static int pm3393_interrupt_enable(struct cmac *cmac)
{
u32 pl_intr;
/* PM3393 - Enabling all hardware block interrupts.
*/
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
/* Don't interrupt on statistics overflow, we are polling */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
/* PM3393 - Global interrupt enable
*/
/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
/* TERMINATOR - PL_INTERUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
return 0;
}
static int pm3393_interrupt_disable(struct cmac *cmac)
{
u32 elmer;
/* PM3393 - Enabling HW interrupt blocks. */
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
/* PM3393 - Global interrupt enable */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
/* ELMER - External chip interrupts. */
t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
return 0;
}
static int pm3393_interrupt_clear(struct cmac *cmac)
{
u32 elmer;
u32 pl_intr;
u32 val32;
/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
* bit WCIMODE=0 for a clear-on-read.
*/
pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
&val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
/* PM3393 - Global interrupt status
*/
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
/* ELMER - External chip interrupts.
*/
t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
return 0;
}
/* Interrupt handler */
static int pm3393_interrupt_handler(struct cmac *cmac)
{
u32 master_intr_status;
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
if (netif_msg_intr(cmac->adapter))
dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
return 0;
}
static int pm3393_enable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
if (which & MAC_DIRECTION_TX) {
u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
if (cmac->instance->fc & PAUSE_RX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
if (cmac->instance->fc & PAUSE_TX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
}
cmac->instance->enabled |= which;
return 0;
}
static int pm3393_enable_port(struct cmac *cmac, int which)
{
/* Clear port statistics */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
udelay(2);
memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
pm3393_enable(cmac, which);
/*
* XXX This should be done by the PHY and preferably not at all.
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
t1_link_changed(cmac->adapter, 0);
return 0;
}
static int pm3393_disable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
if (which & MAC_DIRECTION_TX)
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
/*
* The disable is graceful. Give the PM3393 time. Can't wait very
* long here, we may be holding locks.
*/
udelay(20);
cmac->instance->enabled &= ~which;
return 0;
}
static int pm3393_loopback_enable(struct cmac *cmac)
{
return 0;
}
static int pm3393_loopback_disable(struct cmac *cmac)
{
return 0;
}
static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
mtu += 14 + 4;
if (mtu > MAX_FRAME_SIZE)
return -EINVAL;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
{
int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
u32 rx_mode;
/* Disable MAC RX before reconfiguring it */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
(u16)rx_mode);
if (t1_rx_mode_promisc(rm)) {
/* Promiscuous mode. */
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
}
if (t1_rx_mode_allmulti(rm)) {
/* Accept all multicast. */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
/* bit[23:28] */
bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
if (enabled)
pm3393_enable(cmac, MAC_DIRECTION_RX);
return 0;
}
static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
int *duplex, int *fc)
{
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = cmac->instance->fc;
return 0;
}
static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
int fc)
{
if (speed >= 0 && speed != SPEED_10000)
return -1;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -1;
if (fc & ~(PAUSE_TX | PAUSE_RX))
return -1;
if (fc != cmac->instance->fc) {
cmac->instance->fc = (u8) fc;
if (cmac->instance->enabled & MAC_DIRECTION_TX)
pm3393_enable(cmac, MAC_DIRECTION_TX);
}
return 0;
}
#define RMON_UPDATE(mac, name, stat_name) \
{ \
t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
(mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
((u64)(val1 & 0xffff) << 16) | \
((u64)(val2 & 0xff) << 32) | \
((mac)->stats.stat_name & \
0xffffff0000000000ULL); \
if (ro & \
(1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
(mac)->stats.stat_name += 1ULL << 40; \
}
static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
int flag)
{
u64 ro;
u32 val0, val1, val2, val3;
/* Snap the counters */
pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
/* Counter rollover, clear on read */
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
/* Rx stats */
RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
RxInternalMACRcvError);
RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
TxInternalMACXmitError);
RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
return &mac->stats;
}
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
memcpy(mac_addr, cmac->instance->mac_addr, 6);
return 0;
}
static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
/*
* MAC addr: 00:07:43:00:13:09
*
* ma[5] = 0x09
* ma[4] = 0x13
* ma[3] = 0x00
* ma[2] = 0x43
* ma[1] = 0x07
* ma[0] = 0x00
*
* The PM3393 requires byte swapping and reverse order entry
* when programming MAC addresses:
*
* low_bits[15:0] = ma[1]:ma[0]
* mid_bits[31:16] = ma[3]:ma[2]
* high_bits[47:32] = ma[5]:ma[4]
*/
/* Store local copy */
memcpy(cmac->instance->mac_addr, ma, 6);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
hi = ((u32) ma[5] << 8) | (u32) ma[4];
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
/* Set RXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
/* Set TXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
/* Setup Exact Match Filter 1 with our MAC address
*
* Must disable exact match filter before configuring it.
*/
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
val &= 0xff0f;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
val |= 0x0090;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static void pm3393_destroy(struct cmac *cmac)
{
kfree(cmac);
}
static struct cmac_ops pm3393_ops = {
.destroy = pm3393_destroy,
.reset = pm3393_reset,
.interrupt_enable = pm3393_interrupt_enable,
.interrupt_disable = pm3393_interrupt_disable,
.interrupt_clear = pm3393_interrupt_clear,
.interrupt_handler = pm3393_interrupt_handler,
.enable = pm3393_enable_port,
.disable = pm3393_disable,
.loopback_enable = pm3393_loopback_enable,
.loopback_disable = pm3393_loopback_disable,
.set_mtu = pm3393_set_mtu,
.set_rx_mode = pm3393_set_rx_mode,
.get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
.set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
.statistics_update = pm3393_update_statistics,
.macaddress_get = pm3393_macaddress_get,
.macaddress_set = pm3393_macaddress_set
};
static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
{
struct cmac *cmac;
cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
if (!cmac)
return NULL;
cmac->ops = &pm3393_ops;
cmac->instance = (cmac_instance *) (cmac + 1);
cmac->adapter = adapter;
cmac->instance->fc = PAUSE_TX | PAUSE_RX;
t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
/* For T1 use timer based Mac flow control. */
t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
/* Setup Exact Match Filter 0 to allow broadcast packets.
*/
t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
return cmac;
}
static int pm3393_mac_reset(adapter_t * adapter)
{
u32 val;
u32 x;
u32 is_pl4_reset_finished;
u32 is_pl4_outof_lock;
u32 is_xaui_mabc_pll_locked;
u32 successful_reset;
int i;
/* The following steps are required to properly reset
* the PM3393. This information is provided in the
* PM3393 datasheet (Issue 2: November 2002)
* section 13.1 -- Device Reset.
*
* The PM3393 has three types of components that are
* individually reset:
*
* DRESETB - Digital circuitry
* PL4_ARESETB - PL4 analog circuitry
* XAUI_ARESETB - XAUI bus analog circuitry
*
* Steps to reset PM3393 using RSTB pin:
*
* 1. Assert RSTB pin low ( write 0 )
* 2. Wait at least 1ms to initiate a complete initialization of device.
* 3. Wait until all external clocks and REFSEL are stable.
* 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
* 5. De-assert RSTB ( write 1 )
* 6. Wait until internal timers to expires after ~14ms.
* - Allows analog clock synthesizer(PL4CSU) to stabilize to
* selected reference frequency before allowing the digital
* portion of the device to operate.
* 7. Wait at least 200us for XAUI interface to stabilize.
* 8. Verify the PM3393 came out of reset successfully.
* Set successful reset flag if everything worked else try again
* a few more times.
*/
successful_reset = 0;
for (i = 0; i < 3 && !successful_reset; i++) {
/* 1 */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 2 */
msleep(1);
/* 3 */
msleep(1);
/* 4 */
msleep(2 /*1 extra ms for safety */ );
/* 5 */
val |= 1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 6 */
msleep(15 /*1 extra ms for safety */ );
/* 7 */
msleep(1);
/* 8 */
/* Has PL4 analog block come out of reset correctly? */
t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
* figure out why? */
/* Have all PL4 block clocks locked? */
x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
/*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
is_pl4_outof_lock = (val & x);
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
/* Has the XAUI MABC PLL circuitry stablized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
if (netif_msg_hw(adapter))
dev_dbg(&adapter->pdev->dev,
"PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
"is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
i, is_pl4_reset_finished, val,
is_pl4_outof_lock, is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
const struct gmac t1_pm3393_ops = {
.stats_update_period = STATS_TICK_SECS,
.create = pm3393_mac_create,
.reset = pm3393_mac_reset,
};
| gpl-2.0 |
AnguisCaptor/PwnKernel_Hammerhead | sound/isa/gus/gus_dram.c | 14921 | 2994 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* DRAM access routines
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
static int snd_gus_dram_poke(struct snd_gus_card *gus, char __user *_buffer,
unsigned int address, unsigned int size)
{
unsigned long flags;
unsigned int size1, size2;
char buffer[256], *pbuffer;
while (size > 0) {
size1 = size > sizeof(buffer) ? sizeof(buffer) : size;
if (copy_from_user(buffer, _buffer, size1))
return -EFAULT;
if (gus->interwave) {
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01);
snd_gf1_dram_addr(gus, address);
outsb(GUSP(gus, DRAM), buffer, size1);
spin_unlock_irqrestore(&gus->reg_lock, flags);
address += size1;
} else {
pbuffer = buffer;
size2 = size1;
while (size2--)
snd_gf1_poke(gus, address++, *pbuffer++);
}
size -= size1;
_buffer += size1;
}
return 0;
}
int snd_gus_dram_write(struct snd_gus_card *gus, char __user *buffer,
unsigned int address, unsigned int size)
{
return snd_gus_dram_poke(gus, buffer, address, size);
}
static int snd_gus_dram_peek(struct snd_gus_card *gus, char __user *_buffer,
unsigned int address, unsigned int size,
int rom)
{
unsigned long flags;
unsigned int size1, size2;
char buffer[256], *pbuffer;
while (size > 0) {
size1 = size > sizeof(buffer) ? sizeof(buffer) : size;
if (gus->interwave) {
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, rom ? 0x03 : 0x01);
snd_gf1_dram_addr(gus, address);
insb(GUSP(gus, DRAM), buffer, size1);
snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01);
spin_unlock_irqrestore(&gus->reg_lock, flags);
address += size1;
} else {
pbuffer = buffer;
size2 = size1;
while (size2--)
*pbuffer++ = snd_gf1_peek(gus, address++);
}
if (copy_to_user(_buffer, buffer, size1))
return -EFAULT;
size -= size1;
_buffer += size1;
}
return 0;
}
int snd_gus_dram_read(struct snd_gus_card *gus, char __user *buffer,
unsigned int address, unsigned int size,
int rom)
{
return snd_gus_dram_peek(gus, buffer, address, size, rom);
}
| gpl-2.0 |
louissobel/828-qemu-fork | hw/i2c/smbus.c | 74 | 8314 | /*
* QEMU SMBus device emulation.
*
* Copyright (c) 2007 CodeSourcery.
* Written by Paul Brook
*
* This code is licensed under the LGPL.
*/
/* TODO: Implement PEC. */
#include "hw/hw.h"
#include "hw/i2c/i2c.h"
#include "hw/i2c/smbus.h"
//#define DEBUG_SMBUS 1
#ifdef DEBUG_SMBUS
#define DPRINTF(fmt, ...) \
do { printf("smbus(%02x): " fmt , dev->i2c.address, ## __VA_ARGS__); } while (0)
#define BADF(fmt, ...) \
do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__); exit(1);} while (0)
#else
#define DPRINTF(fmt, ...) do {} while(0)
#define BADF(fmt, ...) \
do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__);} while (0)
#endif
enum {
SMBUS_IDLE,
SMBUS_WRITE_DATA,
SMBUS_RECV_BYTE,
SMBUS_READ_DATA,
SMBUS_DONE,
SMBUS_CONFUSED = -1
};
static void smbus_do_quick_cmd(SMBusDevice *dev, int recv)
{
SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev);
DPRINTF("Quick Command %d\n", recv);
if (sc->quick_cmd) {
sc->quick_cmd(dev, recv);
}
}
static void smbus_do_write(SMBusDevice *dev)
{
SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev);
if (dev->data_len == 0) {
smbus_do_quick_cmd(dev, 0);
} else if (dev->data_len == 1) {
DPRINTF("Send Byte\n");
if (sc->send_byte) {
sc->send_byte(dev, dev->data_buf[0]);
}
} else {
dev->command = dev->data_buf[0];
DPRINTF("Command %d len %d\n", dev->command, dev->data_len - 1);
if (sc->write_data) {
sc->write_data(dev, dev->command, dev->data_buf + 1,
dev->data_len - 1);
}
}
}
static void smbus_i2c_event(I2CSlave *s, enum i2c_event event)
{
SMBusDevice *dev = SMBUS_DEVICE(s);
switch (event) {
case I2C_START_SEND:
switch (dev->mode) {
case SMBUS_IDLE:
DPRINTF("Incoming data\n");
dev->mode = SMBUS_WRITE_DATA;
break;
default:
BADF("Unexpected send start condition in state %d\n", dev->mode);
dev->mode = SMBUS_CONFUSED;
break;
}
break;
case I2C_START_RECV:
switch (dev->mode) {
case SMBUS_IDLE:
DPRINTF("Read mode\n");
dev->mode = SMBUS_RECV_BYTE;
break;
case SMBUS_WRITE_DATA:
if (dev->data_len == 0) {
BADF("Read after write with no data\n");
dev->mode = SMBUS_CONFUSED;
} else {
if (dev->data_len > 1) {
smbus_do_write(dev);
} else {
dev->command = dev->data_buf[0];
DPRINTF("%02x: Command %d\n", dev->i2c.address,
dev->command);
}
DPRINTF("Read mode\n");
dev->data_len = 0;
dev->mode = SMBUS_READ_DATA;
}
break;
default:
BADF("Unexpected recv start condition in state %d\n", dev->mode);
dev->mode = SMBUS_CONFUSED;
break;
}
break;
case I2C_FINISH:
switch (dev->mode) {
case SMBUS_WRITE_DATA:
smbus_do_write(dev);
break;
case SMBUS_RECV_BYTE:
smbus_do_quick_cmd(dev, 1);
break;
case SMBUS_READ_DATA:
BADF("Unexpected stop during receive\n");
break;
default:
/* Nothing to do. */
break;
}
dev->mode = SMBUS_IDLE;
dev->data_len = 0;
break;
case I2C_NACK:
switch (dev->mode) {
case SMBUS_DONE:
/* Nothing to do. */
break;
case SMBUS_READ_DATA:
dev->mode = SMBUS_DONE;
break;
default:
BADF("Unexpected NACK in state %d\n", dev->mode);
dev->mode = SMBUS_CONFUSED;
break;
}
}
}
static int smbus_i2c_recv(I2CSlave *s)
{
SMBusDevice *dev = SMBUS_DEVICE(s);
SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev);
int ret;
switch (dev->mode) {
case SMBUS_RECV_BYTE:
if (sc->receive_byte) {
ret = sc->receive_byte(dev);
} else {
ret = 0;
}
DPRINTF("Receive Byte %02x\n", ret);
dev->mode = SMBUS_DONE;
break;
case SMBUS_READ_DATA:
if (sc->read_data) {
ret = sc->read_data(dev, dev->command, dev->data_len);
dev->data_len++;
} else {
ret = 0;
}
DPRINTF("Read data %02x\n", ret);
break;
default:
BADF("Unexpected read in state %d\n", dev->mode);
dev->mode = SMBUS_CONFUSED;
ret = 0;
break;
}
return ret;
}
static int smbus_i2c_send(I2CSlave *s, uint8_t data)
{
SMBusDevice *dev = SMBUS_DEVICE(s);
switch (dev->mode) {
case SMBUS_WRITE_DATA:
DPRINTF("Write data %02x\n", data);
dev->data_buf[dev->data_len++] = data;
break;
default:
BADF("Unexpected write in state %d\n", dev->mode);
break;
}
return 0;
}
static int smbus_device_init(I2CSlave *i2c)
{
SMBusDevice *dev = SMBUS_DEVICE(i2c);
SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev);
return sc->init(dev);
}
/* Master device commands. */
void smbus_quick_command(i2c_bus *bus, uint8_t addr, int read)
{
i2c_start_transfer(bus, addr, read);
i2c_end_transfer(bus);
}
uint8_t smbus_receive_byte(i2c_bus *bus, uint8_t addr)
{
uint8_t data;
i2c_start_transfer(bus, addr, 1);
data = i2c_recv(bus);
i2c_nack(bus);
i2c_end_transfer(bus);
return data;
}
void smbus_send_byte(i2c_bus *bus, uint8_t addr, uint8_t data)
{
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, data);
i2c_end_transfer(bus);
}
uint8_t smbus_read_byte(i2c_bus *bus, uint8_t addr, uint8_t command)
{
uint8_t data;
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_start_transfer(bus, addr, 1);
data = i2c_recv(bus);
i2c_nack(bus);
i2c_end_transfer(bus);
return data;
}
void smbus_write_byte(i2c_bus *bus, uint8_t addr, uint8_t command, uint8_t data)
{
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_send(bus, data);
i2c_end_transfer(bus);
}
uint16_t smbus_read_word(i2c_bus *bus, uint8_t addr, uint8_t command)
{
uint16_t data;
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_start_transfer(bus, addr, 1);
data = i2c_recv(bus);
data |= i2c_recv(bus) << 8;
i2c_nack(bus);
i2c_end_transfer(bus);
return data;
}
void smbus_write_word(i2c_bus *bus, uint8_t addr, uint8_t command, uint16_t data)
{
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_send(bus, data & 0xff);
i2c_send(bus, data >> 8);
i2c_end_transfer(bus);
}
int smbus_read_block(i2c_bus *bus, uint8_t addr, uint8_t command, uint8_t *data)
{
int len;
int i;
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_start_transfer(bus, addr, 1);
len = i2c_recv(bus);
if (len > 32)
len = 0;
for (i = 0; i < len; i++)
data[i] = i2c_recv(bus);
i2c_nack(bus);
i2c_end_transfer(bus);
return len;
}
void smbus_write_block(i2c_bus *bus, uint8_t addr, uint8_t command, uint8_t *data,
int len)
{
int i;
if (len > 32)
len = 32;
i2c_start_transfer(bus, addr, 0);
i2c_send(bus, command);
i2c_send(bus, len);
for (i = 0; i < len; i++)
i2c_send(bus, data[i]);
i2c_end_transfer(bus);
}
static void smbus_device_class_init(ObjectClass *klass, void *data)
{
I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass);
sc->init = smbus_device_init;
sc->event = smbus_i2c_event;
sc->recv = smbus_i2c_recv;
sc->send = smbus_i2c_send;
}
static const TypeInfo smbus_device_type_info = {
.name = TYPE_SMBUS_DEVICE,
.parent = TYPE_I2C_SLAVE,
.instance_size = sizeof(SMBusDevice),
.abstract = true,
.class_size = sizeof(SMBusDeviceClass),
.class_init = smbus_device_class_init,
};
static void smbus_device_register_types(void)
{
type_register_static(&smbus_device_type_info);
}
type_init(smbus_device_register_types)
| gpl-2.0 |
TeamEpsilon/linux-3.8 | arch/x86/kernel/aperture_64.c | 330 | 14274 | /*
* Firmware replacement code.
*
* Work around broken BIOSes that don't set an aperture, only set the
* aperture in the AGP bridge, or set too small aperture.
*
* If all fails map the aperture over some low memory. This is cheaper than
* doing bounce buffering. The memory is lost. This is done at early boot
* because only the bootmem allocator can allocate 32+MB.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
/*
* Using 512M as goal, in case kexec will load kernel_big
* that will do the on-position decompress, and could overlap with
* with the gart aperture that is used.
* Sequence:
* kernel_small
* ==> kexec (with kdump trigger path or gart still enabled)
* ==> kernel_small (gart area become e820_reserved)
* ==> kexec (with kdump trigger path or gart still enabled)
* ==> kerne_big (uncompressed size will be big than 64M or 128M)
* So don't use 512M below as gart iommu, leave the space for kernel
* code for safe.
*/
#define GART_MIN_ADDR (512ULL << 20)
#define GART_MAX_ADDR (1ULL << 32)
int gart_iommu_aperture;
int gart_iommu_aperture_disabled __initdata;
int gart_iommu_aperture_allowed __initdata;
int fallback_aper_order __initdata = 1; /* 64MB */
int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
static struct resource gart_resource = {
.name = "GART",
.flags = IORESOURCE_MEM,
};
static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
{
gart_resource.start = aper_base;
gart_resource.end = aper_base + aper_size - 1;
insert_resource(&iomem_resource, &gart_resource);
}
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
static u32 __init allocate_aperture(void)
{
u32 aper_size;
unsigned long addr;
/* aper_size should <= 1G */
if (fallback_aper_order > 5)
fallback_aper_order = 5;
aper_size = (32 * 1024 * 1024) << fallback_aper_order;
/*
* Aperture has to be naturally aligned. This means a 2GB aperture
* won't have much chance of finding a place in the lower 4GB of
* memory. Unfortunately we cannot move it up because that would
* make the IOMMU useless.
*/
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
aper_size, aper_size);
if (!addr || addr + aper_size > GART_MAX_ADDR) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%lx,%uK)\n",
addr, aper_size>>10);
return 0;
}
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
return (u32)addr;
}
/* Find a PCI capability */
static u32 __init find_cap(int bus, int slot, int func, int cap)
{
int bytes;
u8 pos;
if (!(read_pci_config_16(bus, slot, func, PCI_STATUS) &
PCI_STATUS_CAP_LIST))
return 0;
pos = read_pci_config_byte(bus, slot, func, PCI_CAPABILITY_LIST);
for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
u8 id;
pos &= ~3;
id = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = read_pci_config_byte(bus, slot, func,
pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
/* Read a standard AGPv3 bridge header */
static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
{
u32 apsize;
u32 apsizereg;
int nbits;
u32 aper_low, aper_hi;
u64 aper;
u32 old_order;
printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func);
apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14);
if (apsizereg == 0xffffffff) {
printk(KERN_ERR "APSIZE in AGP bridge unreadable\n");
return 0;
}
/* old_order could be the value from NB gart setting */
old_order = *order;
apsize = apsizereg & 0xfff;
/* Some BIOS use weird encodings not in the AGPv3 table. */
if (apsize & 0xff)
apsize |= 0xf00;
nbits = hweight16(apsize);
*order = 7 - nbits;
if ((int)*order < 0) /* < 32MB */
*order = 0;
aper_low = read_pci_config(bus, slot, func, 0x10);
aper_hi = read_pci_config(bus, slot, func, 0x14);
aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
/*
* On some sick chips, APSIZE is 0. It means it wants 4G
* so let double check that order, and lets trust AMD NB settings:
*/
printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n",
aper, 32 << old_order);
if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) {
printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n",
32 << *order, apsizereg);
*order = old_order;
}
printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg);
if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20))
return 0;
return (u32)aper;
}
/*
* Look for an AGP bridge. Windows only expects the aperture in the
* AGP bridge and some BIOS forget to initialize the Northbridge too.
* Work around this here.
*
* Do an PCI bus scan by hand because we're running before the PCI
* subsystem.
*
* All AMD AGP bridges are AGPv3 compliant, so we can do this scan
* generically. It's probably overkill to always scan all slots because
* the AGP bridges should be always an own bus on the HT hierarchy,
* but do it here for future safety.
*/
static u32 __init search_agp_bridge(u32 *order, int *valid_agp)
{
int bus, slot, func;
/* Poor man's PCI discovery */
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
u32 class, cap;
u8 type;
class = read_pci_config(bus, slot, func,
PCI_CLASS_REVISION);
if (class == 0xffffffff)
break;
switch (class >> 16) {
case PCI_CLASS_BRIDGE_HOST:
case PCI_CLASS_BRIDGE_OTHER: /* needed? */
/* AGP bridge? */
cap = find_cap(bus, slot, func,
PCI_CAP_ID_AGP);
if (!cap)
break;
*valid_agp = 1;
return read_agp(bus, slot, func, cap,
order);
}
/* No multi-function device? */
type = read_pci_config_byte(bus, slot, func,
PCI_HEADER_TYPE);
if (!(type & 0x80))
break;
}
}
}
printk(KERN_INFO "No AGP bridge found\n");
return 0;
}
static int gart_fix_e820 __initdata = 1;
static int __init parse_gart_mem(char *p)
{
if (!p)
return -EINVAL;
if (!strncmp(p, "off", 3))
gart_fix_e820 = 0;
else if (!strncmp(p, "on", 2))
gart_fix_e820 = 1;
return 0;
}
early_param("gart_fix_e820", parse_gart_mem);
void __init early_gart_iommu_check(void)
{
/*
* in case it is enabled before, esp for kexec/kdump,
* previous kernel already enable that. memset called
* by allocate_aperture/__alloc_bootmem_nopanic cause restart.
* or second kernel have different position for GART hole. and new
* kernel could use hole as RAM that is still used by GART set by
* first kernel
* or BIOS forget to put that in reserved.
* try to update e820 to make that region as reserved.
*/
u32 agp_aper_order = 0;
int i, fix, slot, valid_agp = 0;
u32 ctl;
u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base = 0, last_aper_base = 0;
int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0;
if (!early_pci_allowed())
return;
/* This is mostly duplicate of iommu_hole_init */
search_agp_bridge(&agp_aper_order, &valid_agp);
fix = 0;
for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
aper_enabled = ctl & GARTEN;
aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
if (last_valid) {
if ((aper_order != last_aper_order) ||
(aper_base != last_aper_base) ||
(aper_enabled != last_aper_enabled)) {
fix = 1;
break;
}
}
last_aper_order = aper_order;
last_aper_base = aper_base;
last_aper_enabled = aper_enabled;
last_valid = 1;
}
}
if (!fix && !aper_enabled)
return;
if (!aper_base || !aper_size || aper_base + aper_size > 0x100000000UL)
fix = 1;
if (gart_fix_e820 && !fix && aper_enabled) {
if (e820_any_mapped(aper_base, aper_base + aper_size,
E820_RAM)) {
/* reserve it, so we can reuse it in second kernel */
printk(KERN_INFO "update e820 for GART\n");
e820_add_region(aper_base, aper_size, E820_RESERVED);
update_e820();
}
}
if (valid_agp)
return;
/* disable them all at first */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
ctl &= ~GARTEN;
write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
}
}
}
static int __initdata printed_gart_size_msg;
int __init gart_iommu_hole_init(void)
{
u32 agp_aper_base = 0, agp_aper_order = 0;
u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base, last_aper_base = 0;
int fix, slot, valid_agp = 0;
int i, node;
if (gart_iommu_aperture_disabled || !fix_aperture ||
!early_pci_allowed())
return -ENODEV;
printk(KERN_INFO "Checking aperture...\n");
if (!fallback_aper_force)
agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
fix = 0;
node = 0;
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus;
int dev_base, dev_limit;
u32 ctl;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
iommu_detected = 1;
gart_iommu_aperture = 1;
x86_init.iommu.iommu_init = gart_iommu_init;
ctl = read_pci_config(bus, slot, 3,
AMD64_GARTAPERTURECTL);
/*
* Before we do anything else disable the GART. It may
* still be enabled if we boot into a crash-kernel here.
* Reconfiguring the GART while it is enabled could have
* unknown side-effects.
*/
ctl &= ~GARTEN;
write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n",
node, aper_base, aper_size >> 20);
node++;
if (!aperture_valid(aper_base, aper_size, 64<<20)) {
if (valid_agp && agp_aper_base &&
agp_aper_base == aper_base &&
agp_aper_order == aper_order) {
/* the same between two setting from NB and agp */
if (!no_iommu &&
max_pfn > MAX_DMA32_PFN &&
!printed_gart_size_msg) {
printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n");
printk(KERN_ERR "please increase GART size in your BIOS setup\n");
printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n");
printed_gart_size_msg = 1;
}
} else {
fix = 1;
goto out;
}
}
if ((last_aper_order && aper_order != last_aper_order) ||
(last_aper_base && aper_base != last_aper_base)) {
fix = 1;
goto out;
}
last_aper_order = aper_order;
last_aper_base = aper_base;
}
}
out:
if (!fix && !fallback_aper_force) {
if (last_aper_base) {
unsigned long n = (32 * 1024 * 1024) << last_aper_order;
insert_aperture_resource((u32)last_aper_base, n);
return 1;
}
return 0;
}
if (!fallback_aper_force) {
aper_alloc = agp_aper_base;
aper_order = agp_aper_order;
}
if (aper_alloc) {
/* Got the aperture from the AGP bridge */
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
fallback_aper_force) {
printk(KERN_INFO
"Your BIOS doesn't leave a aperture memory hole\n");
printk(KERN_INFO
"Please enable the IOMMU option in the BIOS setup\n");
printk(KERN_INFO
"This costs you %d MB of RAM\n",
32 << fallback_aper_order);
aper_order = fallback_aper_order;
aper_alloc = allocate_aperture();
if (!aper_alloc) {
/*
* Could disable AGP and IOMMU here, but it's
* probably not worth it. But the later users
* cannot deal with bad apertures and turning
* on the aperture over memory causes very
* strange problems, so it's better to panic
* early.
*/
panic("Not enough memory for aperture");
}
} else {
return 0;
}
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
/*
* Don't enable translation yet but enable GART IO and CPU
* accesses and set DISTLBWALKPRB since GART table memory is UC.
*/
u32 ctl = aper_order << 1;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
for (slot = dev_base; slot < dev_limit; slot++) {
if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25);
}
}
set_up_gart_resume(aper_order, aper_alloc);
return 1;
}
| gpl-2.0 |
tgraf/net-next | net/irda/ircomm/ircomm_tty_attach.c | 330 | 27496 | /*********************************************************************
*
* Filename: ircomm_tty_attach.c
* Version:
* Description: Code for attaching the serial driver to IrCOMM
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Jun 5 17:42:00 1999
* Modified at: Tue Jan 4 14:20:49 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
#include <linux/init.h>
#include <linux/sched.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/iriap.h>
#include <net/irda/irttp.h>
#include <net/irda/irias_object.h>
#include <net/irda/parameters.h>
#include <net/irda/ircomm_core.h>
#include <net/irda/ircomm_param.h>
#include <net/irda/ircomm_event.h>
#include <net/irda/ircomm_tty.h>
#include <net/irda/ircomm_tty_attach.h>
static void ircomm_tty_ias_register(struct ircomm_tty_cb *self);
static void ircomm_tty_discovery_indication(discinfo_t *discovery,
DISCOVERY_MODE mode,
void *priv);
static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value, void *priv);
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout);
static void ircomm_tty_watchdog_timer_expired(void *data);
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info);
const char *const ircomm_tty_state[] = {
"IRCOMM_TTY_IDLE",
"IRCOMM_TTY_SEARCH",
"IRCOMM_TTY_QUERY_PARAMETERS",
"IRCOMM_TTY_QUERY_LSAP_SEL",
"IRCOMM_TTY_SETUP",
"IRCOMM_TTY_READY",
"*** ERROR *** ",
};
static const char *const ircomm_tty_event[] __maybe_unused = {
"IRCOMM_TTY_ATTACH_CABLE",
"IRCOMM_TTY_DETACH_CABLE",
"IRCOMM_TTY_DATA_REQUEST",
"IRCOMM_TTY_DATA_INDICATION",
"IRCOMM_TTY_DISCOVERY_REQUEST",
"IRCOMM_TTY_DISCOVERY_INDICATION",
"IRCOMM_TTY_CONNECT_CONFIRM",
"IRCOMM_TTY_CONNECT_INDICATION",
"IRCOMM_TTY_DISCONNECT_REQUEST",
"IRCOMM_TTY_DISCONNECT_INDICATION",
"IRCOMM_TTY_WD_TIMER_EXPIRED",
"IRCOMM_TTY_GOT_PARAMETERS",
"IRCOMM_TTY_GOT_LSAPSEL",
"*** ERROR ****",
};
static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info) =
{
ircomm_tty_state_idle,
ircomm_tty_state_search,
ircomm_tty_state_query_parameters,
ircomm_tty_state_query_lsap_sel,
ircomm_tty_state_setup,
ircomm_tty_state_ready,
};
/*
* Function ircomm_tty_attach_cable (driver)
*
* Try to attach cable (IrCOMM link). This function will only return
* when the link has been connected, or if an error condition occurs.
* If success, the return value is the resulting service type.
*/
int ircomm_tty_attach_cable(struct ircomm_tty_cb *self)
{
struct tty_struct *tty;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Check if somebody has already connected to us */
if (ircomm_is_connected(self->ircomm)) {
pr_debug("%s(), already connected!\n", __func__);
return 0;
}
/* Make sure nobody tries to write before the link is up */
tty = tty_port_tty_get(&self->port);
if (tty) {
tty->hw_stopped = 1;
tty_kref_put(tty);
}
ircomm_tty_ias_register(self);
ircomm_tty_do_event(self, IRCOMM_TTY_ATTACH_CABLE, NULL, NULL);
return 0;
}
/*
* Function ircomm_detach_cable (driver)
*
* Detach cable, or cable has been detached by peer
*
*/
void ircomm_tty_detach_cable(struct ircomm_tty_cb *self)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
del_timer(&self->watchdog_timer);
/* Remove discovery handler */
if (self->ckey) {
irlmp_unregister_client(self->ckey);
self->ckey = NULL;
}
/* Remove IrCOMM hint bits */
if (self->skey) {
irlmp_unregister_service(self->skey);
self->skey = NULL;
}
if (self->iriap) {
iriap_close(self->iriap);
self->iriap = NULL;
}
/* Remove LM-IAS object */
if (self->obj) {
irias_delete_object(self->obj);
self->obj = NULL;
}
ircomm_tty_do_event(self, IRCOMM_TTY_DETACH_CABLE, NULL, NULL);
/* Reset some values */
self->daddr = self->saddr = 0;
self->dlsap_sel = self->slsap_sel = 0;
memset(&self->settings, 0, sizeof(struct ircomm_params));
}
/*
* Function ircomm_tty_ias_register (self)
*
* Register with LM-IAS depending on which service type we are
*
*/
static void ircomm_tty_ias_register(struct ircomm_tty_cb *self)
{
__u8 oct_seq[6];
__u16 hints;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
/* Compute hint bits based on service */
hints = irlmp_service_to_hint(S_COMM);
if (self->service_type & IRCOMM_3_WIRE_RAW)
hints |= irlmp_service_to_hint(S_PRINTER);
/* Advertise IrCOMM hint bit in discovery */
if (!self->skey)
self->skey = irlmp_register_service(hints);
/* Set up a discovery handler */
if (!self->ckey)
self->ckey = irlmp_register_client(hints,
ircomm_tty_discovery_indication,
NULL, (void *) self);
/* If already done, no need to do it again */
if (self->obj)
return;
if (self->service_type & IRCOMM_3_WIRE_RAW) {
/* Register IrLPT with LM-IAS */
self->obj = irias_new_object("IrLPT", IAS_IRLPT_ID);
irias_add_integer_attrib(self->obj, "IrDA:IrLMP:LsapSel",
self->slsap_sel, IAS_KERNEL_ATTR);
} else {
/* Register IrCOMM with LM-IAS */
self->obj = irias_new_object("IrDA:IrCOMM", IAS_IRCOMM_ID);
irias_add_integer_attrib(self->obj, "IrDA:TinyTP:LsapSel",
self->slsap_sel, IAS_KERNEL_ATTR);
/* Code the parameters into the buffer */
irda_param_pack(oct_seq, "bbbbbb",
IRCOMM_SERVICE_TYPE, 1, self->service_type,
IRCOMM_PORT_TYPE, 1, IRCOMM_SERIAL);
/* Register parameters with LM-IAS */
irias_add_octseq_attrib(self->obj, "Parameters", oct_seq, 6,
IAS_KERNEL_ATTR);
}
irias_insert_object(self->obj);
}
/*
* Function ircomm_tty_ias_unregister (self)
*
* Remove our IAS object and client hook while connected.
*
*/
static void ircomm_tty_ias_unregister(struct ircomm_tty_cb *self)
{
/* Remove LM-IAS object now so it is not reused.
* IrCOMM deals very poorly with multiple incoming connections.
* It should looks a lot more like IrNET, and "dup" a server TSAP
* to the application TSAP (based on various rules).
* This is a cheap workaround allowing multiple clients to
* connect to us. It will not always work.
* Each IrCOMM socket has an IAS entry. Incoming connection will
* pick the first one found. So, when we are fully connected,
* we remove our IAS entries so that the next IAS entry is used.
* We do that for *both* client and server, because a server
* can also create client instances.
* Jean II */
if (self->obj) {
irias_delete_object(self->obj);
self->obj = NULL;
}
#if 0
/* Remove discovery handler.
* While we are connected, we no longer need to receive
* discovery events. This would be the case if there is
* multiple IrLAP interfaces. Jean II */
if (self->ckey) {
irlmp_unregister_client(self->ckey);
self->ckey = NULL;
}
#endif
}
/*
* Function ircomm_send_initial_parameters (self)
*
* Send initial parameters to the remote IrCOMM device. These parameters
* must be sent before any data.
*/
int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self)
{
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
if (self->service_type & IRCOMM_3_WIRE_RAW)
return 0;
/*
* Set default values, but only if the application for some reason
* haven't set them already
*/
pr_debug("%s(), data-rate = %d\n", __func__ ,
self->settings.data_rate);
if (!self->settings.data_rate)
self->settings.data_rate = 9600;
pr_debug("%s(), data-format = %d\n", __func__ ,
self->settings.data_format);
if (!self->settings.data_format)
self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */
pr_debug("%s(), flow-control = %d\n", __func__ ,
self->settings.flow_control);
/*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/
/* Do not set delta values for the initial parameters */
self->settings.dte = IRCOMM_DTR | IRCOMM_RTS;
/* Only send service type parameter when we are the client */
if (self->client)
ircomm_param_request(self, IRCOMM_SERVICE_TYPE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);
ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
/* For a 3 wire service, we just flush the last parameter and return */
if (self->settings.service_type == IRCOMM_3_WIRE) {
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
return 0;
}
/* Only 9-wire service types continue here */
ircomm_param_request(self, IRCOMM_FLOW_CONTROL, FALSE);
#if 0
ircomm_param_request(self, IRCOMM_XON_XOFF, FALSE);
ircomm_param_request(self, IRCOMM_ENQ_ACK, FALSE);
#endif
/* Notify peer that we are ready to receive data */
ircomm_param_request(self, IRCOMM_DTE, TRUE);
return 0;
}
/*
* Function ircomm_tty_discovery_indication (discovery)
*
* Remote device is discovered, try query the remote IAS to see which
* device it is, and which services it has.
*
*/
static void ircomm_tty_discovery_indication(discinfo_t *discovery,
DISCOVERY_MODE mode,
void *priv)
{
struct ircomm_tty_cb *self;
struct ircomm_tty_info info;
/* Important note :
* We need to drop all passive discoveries.
* The LSAP management of IrComm is deficient and doesn't deal
* with the case of two instance connecting to each other
* simultaneously (it will deadlock in LMP).
* The proper fix would be to use the same technique as in IrNET,
* to have one server socket and separate instances for the
* connecting/connected socket.
* The workaround is to drop passive discovery, which drastically
* reduce the probability of this happening.
* Jean II */
if(mode == DISCOVERY_PASSIVE)
return;
info.daddr = discovery->daddr;
info.saddr = discovery->saddr;
self = priv;
ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
NULL, &info);
}
/*
* Function ircomm_tty_disconnect_indication (instance, sap, reason, skb)
*
* Link disconnected
*
*/
void ircomm_tty_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
struct tty_struct *tty;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
tty = tty_port_tty_get(&self->port);
if (!tty)
return;
/* This will stop control data transfers */
self->flow = FLOW_STOP;
/* Stop data transfers */
tty->hw_stopped = 1;
ircomm_tty_do_event(self, IRCOMM_TTY_DISCONNECT_INDICATION, NULL,
NULL);
tty_kref_put(tty);
}
/*
* Function ircomm_tty_getvalue_confirm (result, obj_id, value, priv)
*
* Got result from the IAS query we make
*
*/
static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value,
void *priv)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
/* We probably don't need to make any more queries */
iriap_close(self->iriap);
self->iriap = NULL;
/* Check if request succeeded */
if (result != IAS_SUCCESS) {
pr_debug("%s(), got NULL value!\n", __func__);
return;
}
switch (value->type) {
case IAS_OCT_SEQ:
pr_debug("%s(), got octet sequence\n", __func__);
irda_param_extract_all(self, value->t.oct_seq, value->len,
&ircomm_param_info);
ircomm_tty_do_event(self, IRCOMM_TTY_GOT_PARAMETERS, NULL,
NULL);
break;
case IAS_INTEGER:
/* Got LSAP selector */
pr_debug("%s(), got lsapsel = %d\n", __func__ ,
value->t.integer);
if (value->t.integer == -1) {
pr_debug("%s(), invalid value!\n", __func__);
} else
self->dlsap_sel = value->t.integer;
ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL);
break;
case IAS_MISSING:
pr_debug("%s(), got IAS_MISSING\n", __func__);
break;
default:
pr_debug("%s(), got unknown type!\n", __func__);
break;
}
irias_delete_value(value);
}
/*
* Function ircomm_tty_connect_confirm (instance, sap, qos, max_sdu_size, skb)
*
* Connection confirmed
*
*/
void ircomm_tty_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_data_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
self->client = TRUE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
self->flow = FLOW_START;
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_CONFIRM, NULL, NULL);
/* No need to kfree_skb - see ircomm_ttp_connect_confirm() */
}
/*
* Function ircomm_tty_connect_indication (instance, sap, qos, max_sdu_size,
* skb)
*
* we are discovered and being requested to connect by remote device !
*
*/
void ircomm_tty_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_data_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
int clen;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
self->client = FALSE;
self->max_data_size = max_data_size;
self->max_header_size = max_header_size;
self->flow = FLOW_START;
clen = skb->data[0];
if (clen)
irda_param_extract_all(self, skb->data+1,
IRDA_MIN(skb->len, clen),
&ircomm_param_info);
ircomm_tty_do_event(self, IRCOMM_TTY_CONNECT_INDICATION, NULL, NULL);
/* No need to kfree_skb - see ircomm_ttp_connect_indication() */
}
/*
* Function ircomm_tty_link_established (self)
*
* Called when the IrCOMM link is established
*
*/
void ircomm_tty_link_established(struct ircomm_tty_cb *self)
{
struct tty_struct *tty;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
tty = tty_port_tty_get(&self->port);
if (!tty)
return;
del_timer(&self->watchdog_timer);
/*
* IrCOMM link is now up, and if we are not using hardware
* flow-control, then declare the hardware as running. Otherwise we
* will have to wait for the peer device (DCE) to raise the CTS
* line.
*/
if (tty_port_cts_enabled(&self->port) &&
((self->settings.dce & IRCOMM_CTS) == 0)) {
pr_debug("%s(), waiting for CTS ...\n", __func__);
goto put;
} else {
pr_debug("%s(), starting hardware!\n", __func__);
tty->hw_stopped = 0;
/* Wake up processes blocked on open */
wake_up_interruptible(&self->port.open_wait);
}
schedule_work(&self->tqueue);
put:
tty_kref_put(tty);
}
/*
* Function ircomm_tty_start_watchdog_timer (self, timeout)
*
* Start the watchdog timer. This timer is used to make sure that any
* connection attempt is successful, and if not, we will retry after
* the timeout
*/
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
ircomm_tty_watchdog_timer_expired);
}
/*
* Function ircomm_tty_watchdog_timer_expired (data)
*
* Called when the connect procedure have taken to much time.
*
*/
static void ircomm_tty_watchdog_timer_expired(void *data)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
ircomm_tty_do_event(self, IRCOMM_TTY_WD_TIMER_EXPIRED, NULL, NULL);
}
/*
* Function ircomm_tty_do_event (self, event, skb)
*
* Process event
*
*/
int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
struct sk_buff *skb, struct ircomm_tty_info *info)
{
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
return (*state[self->state])(self, event, skb, info);
}
/*
* Function ircomm_tty_next_state (self, state)
*
* Switch state
*
*/
static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_STATE state)
{
/*
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
pr_debug("%s: next state=%s, service type=%d\n", __func__ ,
ircomm_tty_state[self->state], self->service_type);
*/
self->state = state;
}
/*
* Function ircomm_tty_state_idle (self, event, skb, info)
*
* Just hanging around
*
*/
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_ATTACH_CABLE:
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
break;
case IRCOMM_TTY_DISCOVERY_INDICATION:
self->daddr = info->daddr;
self->saddr = info->saddr;
if (self->iriap) {
net_warn_ratelimited("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
iriap_getvaluebyclass_request(self->iriap,
self->saddr, self->daddr,
"IrDA:IrCOMM", "Parameters");
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Just stay idle */
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_search (self, event, skb, info)
*
* Trying to discover an IrCOMM device
*
*/
static int ircomm_tty_state_search(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_DISCOVERY_INDICATION:
self->daddr = info->daddr;
self->saddr = info->saddr;
if (self->iriap) {
net_warn_ratelimited("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
if (self->service_type == IRCOMM_3_WIRE_RAW) {
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr, "IrLPT",
"IrDA:IrLMP:LsapSel");
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
} else {
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr,
"IrDA:IrCOMM",
"Parameters");
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_PARAMETERS);
}
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
#if 1
/* Give up */
#else
/* Try to discover any remote devices */
ircomm_tty_start_watchdog_timer(self, 3*HZ);
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
#endif
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_query (self, event, skb, info)
*
* Querying the remote LM-IAS for IrCOMM parameters
*
*/
static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_PARAMETERS:
if (self->iriap) {
net_warn_ratelimited("%s(), busy with a previous query\n",
__func__);
return -EBUSY;
}
self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self,
ircomm_tty_getvalue_confirm);
iriap_getvaluebyclass_request(self->iriap, self->saddr,
self->daddr, "IrDA:IrCOMM",
"IrDA:TinyTP:LsapSel");
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_QUERY_LSAP_SEL);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_query_lsap_sel (self, event, skb, info)
*
* Query remote LM-IAS for the LSAP selector which we can connect to
*
*/
static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_GOT_LSAPSEL:
/* Connect to remote device */
ret = ircomm_connect_request(self->ircomm, self->dlsap_sel,
self->saddr, self->daddr,
NULL, self->service_type);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
ircomm_tty_next_state(self, IRCOMM_TTY_SETUP);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_setup (self, event, skb, info)
*
* Trying to connect
*
*/
static int ircomm_tty_state_setup(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
pr_debug("%s: state=%s, event=%s\n", __func__ ,
ircomm_tty_state[self->state], ircomm_tty_event[event]);
switch (event) {
case IRCOMM_TTY_CONNECT_CONFIRM:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/*
* Send initial parameters. This will also send out queued
* parameters waiting for the connection to come up
*/
ircomm_tty_send_initial_parameters(self);
ircomm_tty_link_established(self);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_CONNECT_INDICATION:
del_timer(&self->watchdog_timer);
ircomm_tty_ias_unregister(self);
/* Accept connection */
ircomm_connect_response(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_READY);
break;
case IRCOMM_TTY_WD_TIMER_EXPIRED:
/* Go back to search mode */
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
break;
case IRCOMM_TTY_DETACH_CABLE:
/* ircomm_disconnect_request(self->ircomm, NULL); */
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
/*
* Function ircomm_tty_state_ready (self, event, skb, info)
*
* IrCOMM is now connected
*
*/
static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
struct sk_buff *skb,
struct ircomm_tty_info *info)
{
int ret = 0;
switch (event) {
case IRCOMM_TTY_DATA_REQUEST:
ret = ircomm_data_request(self->ircomm, skb);
break;
case IRCOMM_TTY_DETACH_CABLE:
ircomm_disconnect_request(self->ircomm, NULL);
ircomm_tty_next_state(self, IRCOMM_TTY_IDLE);
break;
case IRCOMM_TTY_DISCONNECT_INDICATION:
ircomm_tty_ias_register(self);
ircomm_tty_next_state(self, IRCOMM_TTY_SEARCH);
ircomm_tty_start_watchdog_timer(self, 3*HZ);
if (tty_port_check_carrier(&self->port)) {
/* Drop carrier */
self->settings.dce = IRCOMM_DELTA_CD;
ircomm_tty_check_modem_status(self);
} else {
pr_debug("%s(), hanging up!\n", __func__);
tty_port_tty_hangup(&self->port, false);
}
break;
default:
pr_debug("%s(), unknown event: %s\n", __func__ ,
ircomm_tty_event[event]);
ret = -EINVAL;
}
return ret;
}
| gpl-2.0 |
szezso/android_kernel_motorola_msm8916 | fs/hpfs/super.c | 1354 | 19173 | /*
* linux/fs/hpfs/super.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* mounting, unmounting, error handling
*/
#include "hpfs_fn.h"
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/init.h>
#include <linux/statfs.h>
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
static void mark_dirty(struct super_block *s, int remount)
{
if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) {
struct buffer_head *bh;
struct hpfs_spare_block *sb;
if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
sb->dirty = 1;
sb->old_wrote = 0;
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
}
}
}
/* Mark the filesystem clean (mark it dirty for chkdsk if chkdsk==2 or if there
were errors) */
static void unmark_dirty(struct super_block *s)
{
struct buffer_head *bh;
struct hpfs_spare_block *sb;
if (s->s_flags & MS_RDONLY) return;
sync_blockdev(s->s_bdev);
if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error;
sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error;
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
}
}
/* Filesystem error... */
static char err_buf[1024];
void hpfs_error(struct super_block *s, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(err_buf, sizeof(err_buf), fmt, args);
va_end(args);
printk("HPFS: filesystem error: %s", err_buf);
if (!hpfs_sb(s)->sb_was_error) {
if (hpfs_sb(s)->sb_err == 2) {
printk("; crashing the system because you wanted it\n");
mark_dirty(s, 0);
panic("HPFS panic");
} else if (hpfs_sb(s)->sb_err == 1) {
if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n");
else {
printk("; remounting read-only\n");
mark_dirty(s, 0);
s->s_flags |= MS_RDONLY;
}
} else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
} else printk("\n");
hpfs_sb(s)->sb_was_error = 1;
}
/*
* A little trick to detect cycles in many hpfs structures and don't let the
* kernel crash on corrupted filesystem. When first called, set c2 to 0.
*
* BTW. chkdsk doesn't detect cycles correctly. When I had 2 lost directories
* nested each in other, chkdsk locked up happilly.
*/
int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2,
char *msg)
{
if (*c2 && *c1 == key) {
hpfs_error(s, "cycle detected on key %08x in %s", key, msg);
return 1;
}
(*c2)++;
if (!((*c2 - 1) & *c2)) *c1 = key;
return 0;
}
static void hpfs_put_super(struct super_block *s)
{
struct hpfs_sb_info *sbi = hpfs_sb(s);
hpfs_lock(s);
unmark_dirty(s);
hpfs_unlock(s);
kfree(sbi->sb_cp_table);
kfree(sbi->sb_bmp_dir);
s->s_fs_info = NULL;
kfree(sbi);
}
unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
{
struct quad_buffer_head qbh;
unsigned long *bits;
unsigned count;
bits = hpfs_map_4sectors(s, secno, &qbh, 4);
if (!bits)
return 0;
count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
hpfs_brelse4(&qbh);
return count;
}
static unsigned count_bitmaps(struct super_block *s)
{
unsigned n, count, n_bands;
n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
count = 0;
for (n = 0; n < n_bands; n++)
count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
return count;
}
static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *s = dentry->d_sb;
struct hpfs_sb_info *sbi = hpfs_sb(s);
u64 id = huge_encode_dev(s->s_bdev->bd_dev);
hpfs_lock(s);
/*if (sbi->sb_n_free == -1) {*/
sbi->sb_n_free = count_bitmaps(s);
sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
/*}*/
buf->f_type = s->s_magic;
buf->f_bsize = 512;
buf->f_blocks = sbi->sb_fs_size;
buf->f_bfree = sbi->sb_n_free;
buf->f_bavail = sbi->sb_n_free;
buf->f_files = sbi->sb_dirband_size / 4;
buf->f_ffree = sbi->sb_n_free_dnodes;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = 254;
hpfs_unlock(s);
return 0;
}
static struct kmem_cache * hpfs_inode_cachep;
static struct inode *hpfs_alloc_inode(struct super_block *sb)
{
struct hpfs_inode_info *ei;
ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->vfs_inode.i_version = 1;
return &ei->vfs_inode;
}
static void hpfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
}
static void hpfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, hpfs_i_callback);
}
static void init_once(void *foo)
{
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache",
sizeof(struct hpfs_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (hpfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(hpfs_inode_cachep);
}
/*
* A tiny parser for option strings, stolen from dosfs.
* Stolen again from read-only hpfs.
* And updated for table-driven option parsing.
*/
enum {
Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis,
Opt_check_none, Opt_check_normal, Opt_check_strict,
Opt_err_cont, Opt_err_ro, Opt_err_panic,
Opt_eas_no, Opt_eas_ro, Opt_eas_rw,
Opt_chkdsk_no, Opt_chkdsk_errors, Opt_chkdsk_always,
Opt_timeshift, Opt_err,
};
static const match_table_t tokens = {
{Opt_help, "help"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_umask, "umask=%o"},
{Opt_case_lower, "case=lower"},
{Opt_case_asis, "case=asis"},
{Opt_check_none, "check=none"},
{Opt_check_normal, "check=normal"},
{Opt_check_strict, "check=strict"},
{Opt_err_cont, "errors=continue"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_err_panic, "errors=panic"},
{Opt_eas_no, "eas=no"},
{Opt_eas_ro, "eas=ro"},
{Opt_eas_rw, "eas=rw"},
{Opt_chkdsk_no, "chkdsk=no"},
{Opt_chkdsk_errors, "chkdsk=errors"},
{Opt_chkdsk_always, "chkdsk=always"},
{Opt_timeshift, "timeshift=%d"},
{Opt_err, NULL},
};
static int parse_opts(char *opts, kuid_t *uid, kgid_t *gid, umode_t *umask,
int *lowercase, int *eas, int *chk, int *errs,
int *chkdsk, int *timeshift)
{
char *p;
int option;
if (!opts)
return 1;
/*printk("Parsing opts: '%s'\n",opts);*/
while ((p = strsep(&opts, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_help:
return 2;
case Opt_uid:
if (match_int(args, &option))
return 0;
*uid = make_kuid(current_user_ns(), option);
if (!uid_valid(*uid))
return 0;
break;
case Opt_gid:
if (match_int(args, &option))
return 0;
*gid = make_kgid(current_user_ns(), option);
if (!gid_valid(*gid))
return 0;
break;
case Opt_umask:
if (match_octal(args, &option))
return 0;
*umask = option;
break;
case Opt_case_lower:
*lowercase = 1;
break;
case Opt_case_asis:
*lowercase = 0;
break;
case Opt_check_none:
*chk = 0;
break;
case Opt_check_normal:
*chk = 1;
break;
case Opt_check_strict:
*chk = 2;
break;
case Opt_err_cont:
*errs = 0;
break;
case Opt_err_ro:
*errs = 1;
break;
case Opt_err_panic:
*errs = 2;
break;
case Opt_eas_no:
*eas = 0;
break;
case Opt_eas_ro:
*eas = 1;
break;
case Opt_eas_rw:
*eas = 2;
break;
case Opt_chkdsk_no:
*chkdsk = 0;
break;
case Opt_chkdsk_errors:
*chkdsk = 1;
break;
case Opt_chkdsk_always:
*chkdsk = 2;
break;
case Opt_timeshift:
{
int m = 1;
char *rhs = args[0].from;
if (!rhs || !*rhs)
return 0;
if (*rhs == '-') m = -1;
if (*rhs == '+' || *rhs == '-') rhs++;
*timeshift = simple_strtoul(rhs, &rhs, 0) * m;
if (*rhs)
return 0;
break;
}
default:
return 0;
}
}
return 1;
}
static inline void hpfs_help(void)
{
printk("\n\
HPFS filesystem options:\n\
help do not mount and display this text\n\
uid=xxx set uid of files that don't have uid specified in eas\n\
gid=xxx set gid of files that don't have gid specified in eas\n\
umask=xxx set mode of files that don't have mode specified in eas\n\
case=lower lowercase all files\n\
case=asis do not lowercase files (default)\n\
check=none no fs checks - kernel may crash on corrupted filesystem\n\
check=normal do some checks - it should not crash (default)\n\
check=strict do extra time-consuming checks, used for debugging\n\
errors=continue continue on errors\n\
errors=remount-ro remount read-only if errors found (default)\n\
errors=panic panic on errors\n\
chkdsk=no do not mark fs for chkdsking even if there were errors\n\
chkdsk=errors mark fs dirty if errors found (default)\n\
chkdsk=always always mark fs dirty - used for debugging\n\
eas=no ignore extended attributes\n\
eas=ro read but do not write extended attributes\n\
eas=rw r/w eas => enables chmod, chown, mknod, ln -s (default)\n\
timeshift=nnn add nnn seconds to file times\n\
\n");
}
static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
{
kuid_t uid;
kgid_t gid;
umode_t umask;
int lowercase, eas, chk, errs, chkdsk, timeshift;
int o;
struct hpfs_sb_info *sbi = hpfs_sb(s);
char *new_opts = kstrdup(data, GFP_KERNEL);
*flags |= MS_NOATIME;
hpfs_lock(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
umask = 0777 & ~sbi->sb_mode;
lowercase = sbi->sb_lowercase;
eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk;
errs = sbi->sb_err; timeshift = sbi->sb_timeshift;
if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase,
&eas, &chk, &errs, &chkdsk, ×hift))) {
printk("HPFS: bad mount options.\n");
goto out_err;
}
if (o == 2) {
hpfs_help();
goto out_err;
}
if (timeshift != sbi->sb_timeshift) {
printk("HPFS: timeshift can't be changed using remount.\n");
goto out_err;
}
unmark_dirty(s);
sbi->sb_uid = uid; sbi->sb_gid = gid;
sbi->sb_mode = 0777 & ~umask;
sbi->sb_lowercase = lowercase;
sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
replace_mount_options(s, new_opts);
hpfs_unlock(s);
return 0;
out_err:
hpfs_unlock(s);
kfree(new_opts);
return -EINVAL;
}
/* Super operations */
static const struct super_operations hpfs_sops =
{
.alloc_inode = hpfs_alloc_inode,
.destroy_inode = hpfs_destroy_inode,
.evict_inode = hpfs_evict_inode,
.put_super = hpfs_put_super,
.statfs = hpfs_statfs,
.remount_fs = hpfs_remount_fs,
.show_options = generic_show_options,
};
static int hpfs_fill_super(struct super_block *s, void *options, int silent)
{
struct buffer_head *bh0, *bh1, *bh2;
struct hpfs_boot_block *bootblock;
struct hpfs_super_block *superblock;
struct hpfs_spare_block *spareblock;
struct hpfs_sb_info *sbi;
struct inode *root;
kuid_t uid;
kgid_t gid;
umode_t umask;
int lowercase, eas, chk, errs, chkdsk, timeshift;
dnode_secno root_dno;
struct hpfs_dirent *de = NULL;
struct quad_buffer_head qbh;
int o;
save_mount_options(s, options);
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi) {
return -ENOMEM;
}
s->s_fs_info = sbi;
sbi->sb_bmp_dir = NULL;
sbi->sb_cp_table = NULL;
mutex_init(&sbi->hpfs_mutex);
hpfs_lock(s);
uid = current_uid();
gid = current_gid();
umask = current_umask();
lowercase = 0;
eas = 2;
chk = 1;
errs = 1;
chkdsk = 1;
timeshift = 0;
if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase,
&eas, &chk, &errs, &chkdsk, ×hift))) {
printk("HPFS: bad mount options.\n");
goto bail0;
}
if (o==2) {
hpfs_help();
goto bail0;
}
/*sbi->sb_mounting = 1;*/
sb_set_blocksize(s, 512);
sbi->sb_fs_size = -1;
if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1;
if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2;
if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3;
/* Check magics */
if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC
||*/ le32_to_cpu(superblock->magic) != SB_MAGIC
|| le32_to_cpu(spareblock->magic) != SP_MAGIC) {
if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n");
goto bail4;
}
/* Check version */
if (!(s->s_flags & MS_RDONLY) &&
superblock->funcversion != 2 && superblock->funcversion != 3) {
printk("HPFS: Bad version %d,%d. Mount readonly to go around\n",
(int)superblock->version, (int)superblock->funcversion);
printk("HPFS: please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - mikulas@artax.karlin.mff.cuni.cz\n");
goto bail4;
}
s->s_flags |= MS_NOATIME;
/* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC;
s->s_op = &hpfs_sops;
s->s_d_op = &hpfs_dentry_operations;
sbi->sb_root = le32_to_cpu(superblock->root);
sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors);
sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps);
sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start);
sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band);
sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap);
sbi->sb_uid = uid;
sbi->sb_gid = gid;
sbi->sb_mode = 0777 & ~umask;
sbi->sb_n_free = -1;
sbi->sb_n_free_dnodes = -1;
sbi->sb_lowercase = lowercase;
sbi->sb_eas = eas;
sbi->sb_chk = chk;
sbi->sb_chkdsk = chkdsk;
sbi->sb_err = errs;
sbi->sb_timeshift = timeshift;
sbi->sb_was_error = 0;
sbi->sb_cp_table = NULL;
sbi->sb_c_bitmap = -1;
sbi->sb_max_fwd_alloc = 0xffffff;
if (sbi->sb_fs_size >= 0x80000000) {
hpfs_error(s, "invalid size in superblock: %08x",
(unsigned)sbi->sb_fs_size);
goto bail4;
}
/* Load bitmap directory */
if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
goto bail4;
/* Check for general fs errors*/
if (spareblock->dirty && !spareblock->old_wrote) {
if (errs == 2) {
printk("HPFS: Improperly stopped, not mounted\n");
goto bail4;
}
hpfs_error(s, "improperly stopped");
}
if (!(s->s_flags & MS_RDONLY)) {
spareblock->dirty = 1;
spareblock->old_wrote = 0;
mark_buffer_dirty(bh2);
}
if (spareblock->hotfixes_used || spareblock->n_spares_used) {
if (errs >= 2) {
printk("HPFS: Hotfixes not supported here, try chkdsk\n");
mark_dirty(s, 0);
goto bail4;
}
hpfs_error(s, "hotfixes not supported here, try chkdsk");
if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n");
else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n");
}
if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
if (errs >= 2) {
printk("HPFS: Spare dnodes used, try chkdsk\n");
mark_dirty(s, 0);
goto bail4;
}
hpfs_error(s, "warning: spare dnodes used, try chkdsk");
if (errs == 0) printk("HPFS: Proceeding, but your filesystem could be corrupted if you delete files or directories\n");
}
if (chk) {
unsigned a;
if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) ||
le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) {
hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x",
le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band));
goto bail4;
}
a = sbi->sb_dirband_size;
sbi->sb_dirband_size = 0;
if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") ||
hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") ||
hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) {
mark_dirty(s, 0);
goto bail4;
}
sbi->sb_dirband_size = a;
} else printk("HPFS: You really don't want any checks? You are crazy...\n");
/* Load code page table */
if (le32_to_cpu(spareblock->n_code_pages))
if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir))))
printk("HPFS: Warning: code page support is disabled\n");
brelse(bh2);
brelse(bh1);
brelse(bh0);
root = iget_locked(s, sbi->sb_root);
if (!root)
goto bail0;
hpfs_init_inode(root);
hpfs_read_inode(root);
unlock_new_inode(root);
s->s_root = d_make_root(root);
if (!s->s_root)
goto bail0;
/*
* find the root directory's . pointer & finish filling in the inode
*/
root_dno = hpfs_fnode_dno(s, sbi->sb_root);
if (root_dno)
de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh);
if (!de)
hpfs_error(s, "unable to find root dir");
else {
root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
root->i_atime.tv_nsec = 0;
root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
root->i_mtime.tv_nsec = 0;
root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
root->i_ctime.tv_nsec = 0;
hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
hpfs_i(root)->i_parent_dir = root->i_ino;
if (root->i_size == -1)
root->i_size = 2048;
if (root->i_blocks == -1)
root->i_blocks = 5;
hpfs_brelse4(&qbh);
}
hpfs_unlock(s);
return 0;
bail4: brelse(bh2);
bail3: brelse(bh1);
bail2: brelse(bh0);
bail1:
bail0:
hpfs_unlock(s);
kfree(sbi->sb_bmp_dir);
kfree(sbi->sb_cp_table);
s->s_fs_info = NULL;
kfree(sbi);
return -EINVAL;
}
static struct dentry *hpfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, hpfs_fill_super);
}
static struct file_system_type hpfs_fs_type = {
.owner = THIS_MODULE,
.name = "hpfs",
.mount = hpfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("hpfs");
static int __init init_hpfs_fs(void)
{
int err = init_inodecache();
if (err)
goto out1;
err = register_filesystem(&hpfs_fs_type);
if (err)
goto out;
return 0;
out:
destroy_inodecache();
out1:
return err;
}
static void __exit exit_hpfs_fs(void)
{
unregister_filesystem(&hpfs_fs_type);
destroy_inodecache();
}
module_init(init_hpfs_fs)
module_exit(exit_hpfs_fs)
MODULE_LICENSE("GPL");
| gpl-2.0 |
akuster/linux-yocto-dev | drivers/net/wireless/atmel_pci.c | 1866 | 2202 | /*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2004 Simon Kelley.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Atmel wireless lan drivers; if not, see
<http://www.gnu.org/licenses/>.
******************************************************************************/
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "atmel.h"
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
static const struct pci_device_id card_ids[] = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, card_ids);
static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *);
static void atmel_pci_remove(struct pci_dev *);
static struct pci_driver atmel_driver = {
.name = "atmel",
.id_table = card_ids,
.probe = atmel_pci_probe,
.remove = atmel_pci_remove,
};
static int atmel_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
if (pci_enable_device(pdev))
return -ENODEV;
pci_set_master(pdev);
dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
ATMEL_FW_TYPE_506,
&pdev->dev, NULL, NULL);
if (!dev)
return -ENODEV;
pci_set_drvdata(pdev, dev);
return 0;
}
static void atmel_pci_remove(struct pci_dev *pdev)
{
stop_atmel_card(pci_get_drvdata(pdev));
}
module_pci_driver(atmel_driver);
| gpl-2.0 |
ptmr3/android_kernel_lge_g3 | lib/radix-tree.c | 1866 | 39115 | /*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
* Copyright (C) 2012 Konstantin Khlebnikov
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/radix-tree.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
#ifdef __KERNEL__
#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#else
#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
#define RADIX_TREE_TAG_LONGS \
((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
struct radix_tree_node {
unsigned int height; /* Height from the bottom */
unsigned int count;
union {
struct radix_tree_node *parent; /* Used when ascending tree */
struct rcu_head rcu_head; /* Used when freeing node */
};
void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
/*
* The height_to_maxindex array needs to be one deeper than the maximum
* path as height 0 holds only 1 entry.
*/
static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
/*
* Radix tree node cache.
*/
static struct kmem_cache *radix_tree_node_cachep;
/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
int nr;
struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
static inline void *ptr_to_indirect(void *ptr)
{
return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
}
static inline void *indirect_to_ptr(void *ptr)
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
}
static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
{
return root->gfp_mask & __GFP_BITS_MASK;
}
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__set_bit(offset, node->tags[tag]);
}
static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
int offset)
{
__clear_bit(offset, node->tags[tag]);
}
static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
int offset)
{
return test_bit(offset, node->tags[tag]);
}
static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
{
root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
{
root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
root->gfp_mask &= __GFP_BITS_MASK;
}
static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
{
return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
}
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
*/
static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
{
int idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
if (node->tags[tag][idx])
return 1;
}
return 0;
}
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
* @addr: The address to base the search on
* @size: The bitmap size in bits
* @offset: The bitnumber to start searching at
*
* Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
* Returns next bit offset, or size if nothing found.
*/
static __always_inline unsigned long
radix_tree_find_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
if (!__builtin_constant_p(size))
return find_next_bit(addr, size, offset);
if (offset < size) {
unsigned long tmp;
addr += offset / BITS_PER_LONG;
tmp = *addr >> (offset % BITS_PER_LONG);
if (tmp)
return __ffs(tmp) + offset;
offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
while (offset < size) {
tmp = *++addr;
if (tmp)
return __ffs(tmp) + offset;
offset += BITS_PER_LONG;
}
}
return size;
}
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
*/
static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
if (!(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
/*
* Provided the caller has preloaded here, we will always
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
rtp = &__get_cpu_var(radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
}
static void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
int i;
/*
* must only free zeroed nodes into the slab. radix_tree_shrink
* can leave us with a non-NULL entry in the first slot, so clear
* that here to make sure.
*/
for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
tag_clear(node, i, 0);
node->slots[0] = NULL;
node->count = 0;
kmem_cache_free(radix_tree_node_cachep, node);
}
static inline void
radix_tree_node_free(struct radix_tree_node *node)
{
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*
* To make use of this facility, the radix tree must be initialised without
* __GFP_WAIT being passed to INIT_RADIX_TREE().
*/
int radix_tree_preload(gfp_t gfp_mask)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
int ret = -ENOMEM;
preempt_disable();
rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
rtp = &__get_cpu_var(radix_tree_preloads);
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
rtp->nodes[rtp->nr++] = node;
else
kmem_cache_free(radix_tree_node_cachep, node);
}
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL(radix_tree_preload);
/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
*/
static inline unsigned long radix_tree_maxindex(unsigned int height)
{
return height_to_maxindex[height];
}
/*
* Extend a radix tree so it can store key @index.
*/
static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
struct radix_tree_node *slot;
unsigned int height;
int tag;
/* Figure out what the height should be. */
height = root->height + 1;
while (index > radix_tree_maxindex(height))
height++;
if (root->rnode == NULL) {
root->height = height;
goto out;
}
do {
unsigned int newheight;
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
/* Increase the height. */
newheight = root->height+1;
node->height = newheight;
node->count = 1;
node->parent = NULL;
slot = root->rnode;
if (newheight > 1) {
slot = indirect_to_ptr(slot);
slot->parent = node;
}
node->slots[0] = slot;
node = ptr_to_indirect(node);
rcu_assign_pointer(root->rnode, node);
root->height = newheight;
} while (height > root->height);
out:
return 0;
}
/**
* radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
* @item: item to insert
*
* Insert an item into the radix tree at position @index.
*/
int radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
struct radix_tree_node *node = NULL, *slot;
unsigned int height, shift;
int offset;
int error;
BUG_ON(radix_tree_is_indirect_ptr(item));
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
}
slot = indirect_to_ptr(root->rnode);
height = root->height;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
while (height > 0) {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
return -ENOMEM;
slot->height = height;
slot->parent = node;
if (node) {
rcu_assign_pointer(node->slots[offset], slot);
node->count++;
} else
rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
}
/* Go a level down */
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
node = slot;
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
if (slot != NULL)
return -EEXIST;
if (node) {
node->count++;
rcu_assign_pointer(node->slots[offset], item);
BUG_ON(tag_get(node, 0, offset));
BUG_ON(tag_get(node, 1, offset));
} else {
rcu_assign_pointer(root->rnode, item);
BUG_ON(root_tag_get(root, 0));
BUG_ON(root_tag_get(root, 1));
}
return 0;
}
EXPORT_SYMBOL(radix_tree_insert);
/*
* is_slot == 1 : search for the slot.
* is_slot == 0 : search for the node.
*/
static void *radix_tree_lookup_element(struct radix_tree_root *root,
unsigned long index, int is_slot)
{
unsigned int height, shift;
struct radix_tree_node *node, **slot;
node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return NULL;
if (!radix_tree_is_indirect_ptr(node)) {
if (index > 0)
return NULL;
return is_slot ? (void *)&root->rnode : node;
}
node = indirect_to_ptr(node);
height = node->height;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
node = rcu_dereference_raw(*slot);
if (node == NULL)
return NULL;
shift -= RADIX_TREE_MAP_SHIFT;
height--;
} while (height > 0);
return is_slot ? (void *)slot : indirect_to_ptr(node);
}
/**
* radix_tree_lookup_slot - lookup a slot in a radix tree
* @root: radix tree root
* @index: index key
*
* Returns: the slot corresponding to the position @index in the
* radix tree @root. This is useful for update-if-exists operations.
*
* This function can be called under rcu_read_lock iff the slot is not
* modified by radix_tree_replace_slot, otherwise it must be called
* exclusive from other writers. Any dereference of the slot must be done
* using radix_tree_deref_slot.
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
return (void **)radix_tree_lookup_element(root, index, 1);
}
EXPORT_SYMBOL(radix_tree_lookup_slot);
/**
* radix_tree_lookup - perform lookup operation on a radix tree
* @root: radix tree root
* @index: index key
*
* Lookup the item at the position @index in the radix tree @root.
*
* This function can be called under rcu_read_lock, however the caller
* must manage lifetimes of leaf nodes (eg. RCU may also be used to free
* them safely). No RCU barriers are required to access or modify the
* returned item, however.
*/
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
return radix_tree_lookup_element(root, index, 0);
}
EXPORT_SYMBOL(radix_tree_lookup);
/**
* radix_tree_tag_set - set a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index
*
* Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
* corresponding to @index in the radix tree. From
* the root all the way down to the leaf node.
*
* Returns the address of the tagged item. Setting a tag on a not-present
* item is a bug.
*/
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
unsigned int height, shift;
struct radix_tree_node *slot;
height = root->height;
BUG_ON(index > radix_tree_maxindex(height));
slot = indirect_to_ptr(root->rnode);
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
int offset;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
if (!tag_get(slot, tag, offset))
tag_set(slot, tag, offset);
slot = slot->slots[offset];
BUG_ON(slot == NULL);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
/* set the root's tag bit */
if (slot && !root_tag_get(root, tag))
root_tag_set(root, tag);
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
/**
* radix_tree_tag_clear - clear a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index
*
* Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
* corresponding to @index in the radix tree. If
* this causes the leaf node to have no tags set then clear the tag in the
* next-to-leaf node, etc.
*
* Returns the address of the tagged item on success, else NULL. ie:
* has the same return value and semantics as radix_tree_lookup().
*/
void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_node *node = NULL;
struct radix_tree_node *slot = NULL;
unsigned int height, shift;
int uninitialized_var(offset);
height = root->height;
if (index > radix_tree_maxindex(height))
goto out;
shift = height * RADIX_TREE_MAP_SHIFT;
slot = indirect_to_ptr(root->rnode);
while (shift) {
if (slot == NULL)
goto out;
shift -= RADIX_TREE_MAP_SHIFT;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
node = slot;
slot = slot->slots[offset];
}
if (slot == NULL)
goto out;
while (node) {
if (!tag_get(node, tag, offset))
goto out;
tag_clear(node, tag, offset);
if (any_tag_set(node, tag))
goto out;
index >>= RADIX_TREE_MAP_SHIFT;
offset = index & RADIX_TREE_MAP_MASK;
node = node->parent;
}
/* clear the root's tag bit */
if (root_tag_get(root, tag))
root_tag_clear(root, tag);
out:
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
/**
* radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root
* @index: index key
* @tag: tag index (< RADIX_TREE_MAX_TAGS)
*
* Return values:
*
* 0: tag not present or not set
* 1: tag set
*
* Note that the return value of this function may not be relied on, even if
* the RCU lock is held, unless tag modification and node deletion are excluded
* from concurrency.
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
unsigned int height, shift;
struct radix_tree_node *node;
/* check the root's tag bit */
if (!root_tag_get(root, tag))
return 0;
node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return 0;
if (!radix_tree_is_indirect_ptr(node))
return (index == 0);
node = indirect_to_ptr(node);
height = node->height;
if (index > radix_tree_maxindex(height))
return 0;
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
for ( ; ; ) {
int offset;
if (node == NULL)
return 0;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
if (!tag_get(node, tag, offset))
return 0;
if (height == 1)
return 1;
node = rcu_dereference_raw(node->slots[offset]);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
}
EXPORT_SYMBOL(radix_tree_tag_get);
/**
* radix_tree_next_chunk - find next chunk of slots for iteration
*
* @root: radix tree root
* @iter: iterator state
* @flags: RADIX_TREE_ITER_* flags and tag index
* Returns: pointer to chunk first slot, or NULL if iteration is over
*/
void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags)
{
unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *rnode, *node;
unsigned long index, offset;
if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
return NULL;
/*
* Catch next_index overflow after ~0UL. iter->index never overflows
* during iterating; it can be zero only at the beginning.
* And we cannot overflow iter->next_index in a single step,
* because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
*
* This condition also used by radix_tree_next_slot() to stop
* contiguous iterating, and forbid swithing to the next chunk.
*/
index = iter->next_index;
if (!index && iter->index)
return NULL;
rnode = rcu_dereference_raw(root->rnode);
if (radix_tree_is_indirect_ptr(rnode)) {
rnode = indirect_to_ptr(rnode);
} else if (rnode && !index) {
/* Single-slot tree */
iter->index = 0;
iter->next_index = 1;
iter->tags = 1;
return (void **)&root->rnode;
} else
return NULL;
restart:
shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT;
offset = index >> shift;
/* Index outside of the tree */
if (offset >= RADIX_TREE_MAP_SIZE)
return NULL;
node = rnode;
while (1) {
if ((flags & RADIX_TREE_ITER_TAGGED) ?
!test_bit(offset, node->tags[tag]) :
!node->slots[offset]) {
/* Hole detected */
if (flags & RADIX_TREE_ITER_CONTIG)
return NULL;
if (flags & RADIX_TREE_ITER_TAGGED)
offset = radix_tree_find_next_bit(
node->tags[tag],
RADIX_TREE_MAP_SIZE,
offset + 1);
else
while (++offset < RADIX_TREE_MAP_SIZE) {
if (node->slots[offset])
break;
}
index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1);
index += offset << shift;
/* Overflow after ~0UL */
if (!index)
return NULL;
if (offset == RADIX_TREE_MAP_SIZE)
goto restart;
}
/* This is leaf-node */
if (!shift)
break;
node = rcu_dereference_raw(node->slots[offset]);
if (node == NULL)
goto restart;
shift -= RADIX_TREE_MAP_SHIFT;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
}
/* Update the iterator state */
iter->index = index;
iter->next_index = (index | RADIX_TREE_MAP_MASK) + 1;
/* Construct iter->tags bit-mask from node->tags[tag] array */
if (flags & RADIX_TREE_ITER_TAGGED) {
unsigned tag_long, tag_bit;
tag_long = offset / BITS_PER_LONG;
tag_bit = offset % BITS_PER_LONG;
iter->tags = node->tags[tag][tag_long] >> tag_bit;
/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
/* Pick tags from next element */
if (tag_bit)
iter->tags |= node->tags[tag][tag_long + 1] <<
(BITS_PER_LONG - tag_bit);
/* Clip chunk size, here only BITS_PER_LONG tags */
iter->next_index = index + BITS_PER_LONG;
}
}
return node->slots + offset;
}
EXPORT_SYMBOL(radix_tree_next_chunk);
/**
* radix_tree_range_tag_if_tagged - for each item in given range set given
* tag if item has another tag set
* @root: radix tree root
* @first_indexp: pointer to a starting index of a range to scan
* @last_index: last index of a range to scan
* @nr_to_tag: maximum number items to tag
* @iftag: tag index to test
* @settag: tag index to set if tested tag is set
*
* This function scans range of radix tree from first_index to last_index
* (inclusive). For each item in the range if iftag is set, the function sets
* also settag. The function stops either after tagging nr_to_tag items or
* after reaching last_index.
*
* The tags must be set from the leaf level only and propagated back up the
* path to the root. We must do this so that we resolve the full path before
* setting any tags on intermediate nodes. If we set tags as we descend, then
* we can get to the leaf node and find that the index that has the iftag
* set is outside the range we are scanning. This reults in dangling tags and
* can lead to problems with later tag operations (e.g. livelocks on lookups).
*
* The function returns number of leaves where the tag was set and sets
* *first_indexp to the first unscanned index.
* WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
* be prepared to handle that.
*/
unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
unsigned long *first_indexp, unsigned long last_index,
unsigned long nr_to_tag,
unsigned int iftag, unsigned int settag)
{
unsigned int height = root->height;
struct radix_tree_node *node = NULL;
struct radix_tree_node *slot;
unsigned int shift;
unsigned long tagged = 0;
unsigned long index = *first_indexp;
last_index = min(last_index, radix_tree_maxindex(height));
if (index > last_index)
return 0;
if (!nr_to_tag)
return 0;
if (!root_tag_get(root, iftag)) {
*first_indexp = last_index + 1;
return 0;
}
if (height == 0) {
*first_indexp = last_index + 1;
root_tag_set(root, settag);
return 1;
}
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = indirect_to_ptr(root->rnode);
for (;;) {
unsigned long upindex;
int offset;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
if (!slot->slots[offset])
goto next;
if (!tag_get(slot, iftag, offset))
goto next;
if (shift) {
/* Go down one level */
shift -= RADIX_TREE_MAP_SHIFT;
node = slot;
slot = slot->slots[offset];
continue;
}
/* tag the leaf */
tagged++;
tag_set(slot, settag, offset);
/* walk back up the path tagging interior nodes */
upindex = index;
while (node) {
upindex >>= RADIX_TREE_MAP_SHIFT;
offset = upindex & RADIX_TREE_MAP_MASK;
/* stop if we find a node with the tag already set */
if (tag_get(node, settag, offset))
break;
tag_set(node, settag, offset);
node = node->parent;
}
/*
* Small optimization: now clear that node pointer.
* Since all of this slot's ancestors now have the tag set
* from setting it above, we have no further need to walk
* back up the tree setting tags, until we update slot to
* point to another radix_tree_node.
*/
node = NULL;
next:
/* Go to next item at level determined by 'shift' */
index = ((index >> shift) + 1) << shift;
/* Overflow can happen when last_index is ~0UL... */
if (index > last_index || !index)
break;
if (tagged >= nr_to_tag)
break;
while (((index >> shift) & RADIX_TREE_MAP_MASK) == 0) {
/*
* We've fully scanned this node. Go up. Because
* last_index is guaranteed to be in the tree, what
* we do below cannot wander astray.
*/
slot = slot->parent;
shift += RADIX_TREE_MAP_SHIFT;
}
}
/*
* We need not to tag the root tag if there is no tag which is set with
* settag within the range from *first_indexp to last_index.
*/
if (tagged > 0)
root_tag_set(root, settag);
*first_indexp = index;
return tagged;
}
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
/**
* radix_tree_next_hole - find the next hole (not-present entry)
* @root: tree root
* @index: index key
* @max_scan: maximum range to search
*
* Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
* indexed hole.
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'return - index >= max_scan'
* will be true). In rare cases of index wrap-around, 0 will be returned.
*
* radix_tree_next_hole may be called under rcu_read_lock. However, like
* radix_tree_gang_lookup, this will not atomically search a snapshot of
* the tree at a single point in time. For example, if a hole is created
* at index 5, then subsequently a hole is created at index 10,
* radix_tree_next_hole covering both indexes may return 10 if called
* under rcu_read_lock.
*/
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan)
{
unsigned long i;
for (i = 0; i < max_scan; i++) {
if (!radix_tree_lookup(root, index))
break;
index++;
if (index == 0)
break;
}
return index;
}
EXPORT_SYMBOL(radix_tree_next_hole);
/**
* radix_tree_prev_hole - find the prev hole (not-present entry)
* @root: tree root
* @index: index key
* @max_scan: maximum range to search
*
* Search backwards in the range [max(index-max_scan+1, 0), index]
* for the first hole.
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'index - return >= max_scan'
* will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
*
* radix_tree_next_hole may be called under rcu_read_lock. However, like
* radix_tree_gang_lookup, this will not atomically search a snapshot of
* the tree at a single point in time. For example, if a hole is created
* at index 10, then subsequently a hole is created at index 5,
* radix_tree_prev_hole covering both indexes may return 5 if called under
* rcu_read_lock.
*/
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan)
{
unsigned long i;
for (i = 0; i < max_scan; i++) {
if (!radix_tree_lookup(root, index))
break;
index--;
if (index == ULONG_MAX)
break;
}
return index;
}
EXPORT_SYMBOL(radix_tree_prev_hole);
/**
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
*
* Performs an index-ascending scan of the tree for present items. Places
* them at *@results and returns the number of items which were placed at
* *@results.
*
* The implementation is naive.
*
* Like radix_tree_lookup, radix_tree_gang_lookup may be called under
* rcu_read_lock. In this case, rather than the returned results being
* an atomic snapshot of the tree at a single point in time, the semantics
* of an RCU protected gang lookup are as though multiple radix_tree_lookups
* have been issued in individual locks, and results stored in 'results'.
*/
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
if (!results[ret])
continue;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup);
/**
* radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
* @root: radix tree root
* @results: where the results of the lookup are placed
* @indices: where their indices should be placed (but usually NULL)
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
*
* Performs an index-ascending scan of the tree for present items. Places
* their slots at *@results and returns the number of items which were
* placed at *@results.
*
* The implementation is naive.
*
* Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
* be dereferenced with radix_tree_deref_slot, and if using only RCU
* protection, radix_tree_deref_slot may fail requiring a retry.
*/
unsigned int
radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
results[ret] = slot;
if (indices)
indices[ret] = iter.index;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
/**
* radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
* based on a tag
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
* @tag: the tag index (< RADIX_TREE_MAX_TAGS)
*
* Performs an index-ascending scan of the tree for present items which
* have the tag indexed by @tag set. Places the items at *@results and
* returns the number of items which were placed at *@results.
*/
unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
if (!results[ret])
continue;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
* radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
* radix tree based on a tag
* @root: radix tree root
* @results: where the results of the lookup are placed
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
* @tag: the tag index (< RADIX_TREE_MAX_TAGS)
*
* Performs an index-ascending scan of the tree for present items which
* have the tag indexed by @tag set. Places the slots at *@results and
* returns the number of slots which were placed at *@results.
*/
unsigned int
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items,
unsigned int tag)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
results[ret] = slot;
if (++ret == max_items)
break;
}
return ret;
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
#include <linux/sched.h> /* for cond_resched() */
/*
* This linear search is at present only useful to shmem_unuse_inode().
*/
static unsigned long __locate(struct radix_tree_node *slot, void *item,
unsigned long index, unsigned long *found_index)
{
unsigned int shift, height;
unsigned long i;
height = slot->height;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
for ( ; height > 1; height--) {
i = (index >> shift) & RADIX_TREE_MAP_MASK;
for (;;) {
if (slot->slots[i] != NULL)
break;
index &= ~((1UL << shift) - 1);
index += 1UL << shift;
if (index == 0)
goto out; /* 32-bit wraparound */
i++;
if (i == RADIX_TREE_MAP_SIZE)
goto out;
}
shift -= RADIX_TREE_MAP_SHIFT;
slot = rcu_dereference_raw(slot->slots[i]);
if (slot == NULL)
goto out;
}
/* Bottom level: check items */
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
if (slot->slots[i] == item) {
*found_index = index + i;
index = 0;
goto out;
}
}
index += RADIX_TREE_MAP_SIZE;
out:
return index;
}
/**
* radix_tree_locate_item - search through radix tree for item
* @root: radix tree root
* @item: item to be found
*
* Returns index where item was found, or -1 if not found.
* Caller must hold no lock (since this time-consuming function needs
* to be preemptible), and must check afterwards if item is still there.
*/
unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
{
struct radix_tree_node *node;
unsigned long max_index;
unsigned long cur_index = 0;
unsigned long found_index = -1;
do {
rcu_read_lock();
node = rcu_dereference_raw(root->rnode);
if (!radix_tree_is_indirect_ptr(node)) {
rcu_read_unlock();
if (node == item)
found_index = 0;
break;
}
node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
if (cur_index > max_index)
break;
cur_index = __locate(node, item, cur_index, &found_index);
rcu_read_unlock();
cond_resched();
} while (cur_index != 0 && cur_index <= max_index);
return found_index;
}
#else
unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
{
return -1;
}
#endif /* CONFIG_SHMEM && CONFIG_SWAP */
/**
* radix_tree_shrink - shrink height of a radix tree to minimal
* @root radix tree root
*/
static inline void radix_tree_shrink(struct radix_tree_root *root)
{
/* try to shrink tree height */
while (root->height > 0) {
struct radix_tree_node *to_free = root->rnode;
struct radix_tree_node *slot;
BUG_ON(!radix_tree_is_indirect_ptr(to_free));
to_free = indirect_to_ptr(to_free);
/*
* The candidate node has more than one child, or its child
* is not at the leftmost slot, we cannot shrink.
*/
if (to_free->count != 1)
break;
if (!to_free->slots[0])
break;
/*
* We don't need rcu_assign_pointer(), since we are simply
* moving the node from one part of the tree to another: if it
* was safe to dereference the old pointer to it
* (to_free->slots[0]), it will be safe to dereference the new
* one (root->rnode) as far as dependent read barriers go.
*/
slot = to_free->slots[0];
if (root->height > 1) {
slot->parent = NULL;
slot = ptr_to_indirect(slot);
}
root->rnode = slot;
root->height--;
/*
* We have a dilemma here. The node's slot[0] must not be
* NULLed in case there are concurrent lookups expecting to
* find the item. However if this was a bottom-level node,
* then it may be subject to the slot pointer being visible
* to callers dereferencing it. If item corresponding to
* slot[0] is subsequently deleted, these callers would expect
* their slot to become empty sooner or later.
*
* For example, lockless pagecache will look up a slot, deref
* the page pointer, and if the page is 0 refcount it means it
* was concurrently deleted from pagecache so try the deref
* again. Fortunately there is already a requirement for logic
* to retry the entire slot lookup -- the indirect pointer
* problem (replacing direct root node with an indirect pointer
* also results in a stale slot). So tag the slot as indirect
* to force callers to retry.
*/
if (root->height == 0)
*((unsigned long *)&to_free->slots[0]) |=
RADIX_TREE_INDIRECT_PTR;
radix_tree_node_free(to_free);
}
}
/**
* radix_tree_delete - delete an item from a radix tree
* @root: radix tree root
* @index: index key
*
* Remove the item at @index from the radix tree rooted at @root.
*
* Returns the address of the deleted item, or NULL if it was not present.
*/
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node = NULL;
struct radix_tree_node *slot = NULL;
struct radix_tree_node *to_free;
unsigned int height, shift;
int tag;
int uninitialized_var(offset);
height = root->height;
if (index > radix_tree_maxindex(height))
goto out;
slot = root->rnode;
if (height == 0) {
root_tag_clear_all(root);
root->rnode = NULL;
goto out;
}
slot = indirect_to_ptr(slot);
shift = height * RADIX_TREE_MAP_SHIFT;
do {
if (slot == NULL)
goto out;
shift -= RADIX_TREE_MAP_SHIFT;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
node = slot;
slot = slot->slots[offset];
} while (shift);
if (slot == NULL)
goto out;
/*
* Clear all tags associated with the item to be deleted.
* This way of doing it would be inefficient, but seldom is any set.
*/
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (tag_get(node, tag, offset))
radix_tree_tag_clear(root, index, tag);
}
to_free = NULL;
/* Now free the nodes we do not need anymore */
while (node) {
node->slots[offset] = NULL;
node->count--;
/*
* Queue the node for deferred freeing after the
* last reference to it disappears (set NULL, above).
*/
if (to_free)
radix_tree_node_free(to_free);
if (node->count) {
if (node == indirect_to_ptr(root->rnode))
radix_tree_shrink(root);
goto out;
}
/* Node with zero slots in use so free it */
to_free = node;
index >>= RADIX_TREE_MAP_SHIFT;
offset = index & RADIX_TREE_MAP_MASK;
node = node->parent;
}
root_tag_clear_all(root);
root->height = 0;
root->rnode = NULL;
if (to_free)
radix_tree_node_free(to_free);
out:
return slot;
}
EXPORT_SYMBOL(radix_tree_delete);
/**
* radix_tree_tagged - test whether any items in the tree are tagged
* @root: radix tree root
* @tag: tag to test
*/
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
{
return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
static void
radix_tree_node_ctor(void *node)
{
memset(node, 0, sizeof(struct radix_tree_node));
}
static __init unsigned long __maxindex(unsigned int height)
{
unsigned int width = height * RADIX_TREE_MAP_SHIFT;
int shift = RADIX_TREE_INDEX_BITS - width;
if (shift < 0)
return ~0UL;
if (shift >= BITS_PER_LONG)
return 0UL;
return ~0UL >> shift;
}
static __init void radix_tree_init_maxindex(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
height_to_maxindex[i] = __maxindex(i);
}
static int radix_tree_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
int cpu = (long)hcpu;
struct radix_tree_preload *rtp;
/* Free per-cpu pool of perloaded nodes */
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
rtp = &per_cpu(radix_tree_preloads, cpu);
while (rtp->nr) {
kmem_cache_free(radix_tree_node_cachep,
rtp->nodes[rtp->nr-1]);
rtp->nodes[rtp->nr-1] = NULL;
rtp->nr--;
}
}
return NOTIFY_OK;
}
void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
| gpl-2.0 |
bigzz/linaro-aarch64 | drivers/net/wireless/brcm80211/brcmsmac/main.c | 2122 | 220018 | /*
* Copyright (c) 2010 Broadcom Corporation
* Copyright (c) 2013 Hauke Mehrtens <hauke@hauke-m.de>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci_ids.h>
#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <brcm_hw_ids.h>
#include <aiutils.h>
#include <chipcommon.h>
#include "rate.h"
#include "scb.h"
#include "phy/phy_hal.h"
#include "channel.h"
#include "antsel.h"
#include "stf.h"
#include "ampdu.h"
#include "mac80211_if.h"
#include "ucode_loader.h"
#include "main.h"
#include "soc.h"
#include "dma.h"
#include "debug.h"
#include "brcms_trace_events.h"
/* watchdog timer, in unit of ms */
#define TIMER_INTERVAL_WATCHDOG 1000
/* radio monitor timer, in unit of ms */
#define TIMER_INTERVAL_RADIOCHK 800
/* beacon interval, in unit of 1024TU */
#define BEACON_INTERVAL_DEFAULT 100
/* n-mode support capability */
/* 2x2 includes both 1x1 & 2x2 devices
* reserved #define 2 for future when we want to separate 1x1 & 2x2 and
* control it independently
*/
#define WL_11N_2x2 1
#define WL_11N_3x3 3
#define WL_11N_4x4 4
#define EDCF_ACI_MASK 0x60
#define EDCF_ACI_SHIFT 5
#define EDCF_ECWMIN_MASK 0x0f
#define EDCF_ECWMAX_SHIFT 4
#define EDCF_AIFSN_MASK 0x0f
#define EDCF_AIFSN_MAX 15
#define EDCF_ECWMAX_MASK 0xf0
#define EDCF_AC_BE_TXOP_STA 0x0000
#define EDCF_AC_BK_TXOP_STA 0x0000
#define EDCF_AC_VO_ACI_STA 0x62
#define EDCF_AC_VO_ECW_STA 0x32
#define EDCF_AC_VI_ACI_STA 0x42
#define EDCF_AC_VI_ECW_STA 0x43
#define EDCF_AC_BK_ECW_STA 0xA4
#define EDCF_AC_VI_TXOP_STA 0x005e
#define EDCF_AC_VO_TXOP_STA 0x002f
#define EDCF_AC_BE_ACI_STA 0x03
#define EDCF_AC_BE_ECW_STA 0xA4
#define EDCF_AC_BK_ACI_STA 0x27
#define EDCF_AC_VO_TXOP_AP 0x002f
#define EDCF_TXOP2USEC(txop) ((txop) << 5)
#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
#define APHY_SYMBOL_TIME 4
#define APHY_PREAMBLE_TIME 16
#define APHY_SIGNAL_TIME 4
#define APHY_SIFS_TIME 16
#define APHY_SERVICE_NBITS 16
#define APHY_TAIL_NBITS 6
#define BPHY_SIFS_TIME 10
#define BPHY_PLCP_SHORT_TIME 96
#define PREN_PREAMBLE 24
#define PREN_MM_EXT 12
#define PREN_PREAMBLE_EXT 4
#define DOT11_MAC_HDR_LEN 24
#define DOT11_ACK_LEN 10
#define DOT11_BA_LEN 4
#define DOT11_OFDM_SIGNAL_EXTENSION 6
#define DOT11_MIN_FRAG_LEN 256
#define DOT11_RTS_LEN 16
#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
#define BPHY_PLCP_TIME 192
#define RIFS_11N_TIME 2
/* length of the BCN template area */
#define BCN_TMPL_LEN 512
/* brcms_bss_info flag bit values */
#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
/* chip rx buffer offset */
#define BRCMS_HWRXOFF 38
/* rfdisable delay timer 500 ms, runs of ALP clock */
#define RFDISABLE_DEFAULT 10000000
#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
/* synthpu_dly times in us */
#define SYNTHPU_DLY_APHY_US 3700
#define SYNTHPU_DLY_BPHY_US 1050
#define SYNTHPU_DLY_NPHY_US 2048
#define SYNTHPU_DLY_LPPHY_US 300
#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */
/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
#define EDCF_SHORT_S 0
#define EDCF_SFB_S 4
#define EDCF_LONG_S 8
#define EDCF_LFB_S 12
#define EDCF_SHORT_M BITFIELD_MASK(4)
#define EDCF_SFB_M BITFIELD_MASK(4)
#define EDCF_LONG_M BITFIELD_MASK(4)
#define EDCF_LFB_M BITFIELD_MASK(4)
#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
#define RETRY_LONG_DEF 4 /* Default Long retry count */
#define RETRY_SHORT_FB 3 /* Short count for fb rate */
#define RETRY_LONG_FB 2 /* Long count for fb rate */
#define APHY_CWMIN 15
#define PHY_CWMAX 1023
#define EDCF_AIFSN_MIN 1
#define FRAGNUM_MASK 0xF
#define APHY_SLOT_TIME 9
#define BPHY_SLOT_TIME 20
#define WL_SPURAVOID_OFF 0
#define WL_SPURAVOID_ON1 1
#define WL_SPURAVOID_ON2 2
/* invalid core flags, use the saved coreflags */
#define BRCMS_USE_COREFLAGS 0xffffffff
/* values for PLCPHdr_override */
#define BRCMS_PLCP_AUTO -1
#define BRCMS_PLCP_SHORT 0
#define BRCMS_PLCP_LONG 1
/* values for g_protection_override and n_protection_override */
#define BRCMS_PROTECTION_AUTO -1
#define BRCMS_PROTECTION_OFF 0
#define BRCMS_PROTECTION_ON 1
#define BRCMS_PROTECTION_MMHDR_ONLY 2
#define BRCMS_PROTECTION_CTS_ONLY 3
/* values for g_protection_control and n_protection_control */
#define BRCMS_PROTECTION_CTL_OFF 0
#define BRCMS_PROTECTION_CTL_LOCAL 1
#define BRCMS_PROTECTION_CTL_OVERLAP 2
/* values for n_protection */
#define BRCMS_N_PROTECTION_OFF 0
#define BRCMS_N_PROTECTION_OPTIONAL 1
#define BRCMS_N_PROTECTION_20IN40 2
#define BRCMS_N_PROTECTION_MIXEDMODE 3
/* values for band specific 40MHz capabilities */
#define BRCMS_N_BW_20ALL 0
#define BRCMS_N_BW_40ALL 1
#define BRCMS_N_BW_20IN2G_40IN5G 2
/* bitflags for SGI support (sgi_rx iovar) */
#define BRCMS_N_SGI_20 0x01
#define BRCMS_N_SGI_40 0x02
/* defines used by the nrate iovar */
/* MSC in use,indicates b0-6 holds an mcs */
#define NRATE_MCS_INUSE 0x00000080
/* rate/mcs value */
#define NRATE_RATE_MASK 0x0000007f
/* stf mode mask: siso, cdd, stbc, sdm */
#define NRATE_STF_MASK 0x0000ff00
/* stf mode shift */
#define NRATE_STF_SHIFT 8
/* bit indicate to override mcs only */
#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
#define NRATE_SGI_SHIFT 23 /* sgi mode */
#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */
#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
#define NRATE_STF_SISO 0 /* stf mode SISO */
#define NRATE_STF_CDD 1 /* stf mode CDD */
#define NRATE_STF_STBC 2 /* stf mode STBC */
#define NRATE_STF_SDM 3 /* stf mode SDM */
#define MAX_DMA_SEGS 4
/* # of entries in Tx FIFO */
#define NTXD 64
/* Max # of entries in Rx FIFO based on 4kb page size */
#define NRXD 256
/* Amount of headroom to leave in Tx FIFO */
#define TX_HEADROOM 4
/* try to keep this # rbufs posted to the chip */
#define NRXBUFPOST 32
/* max # frames to process in brcms_c_recv() */
#define RXBND 8
/* max # tx status to process in wlc_txstatus() */
#define TXSBND 8
/* brcmu_format_flags() bit description structure */
struct brcms_c_bit_desc {
u32 bit;
const char *name;
};
/*
* The following table lists the buffer memory allocated to xmt fifos in HW.
* the size is in units of 256bytes(one block), total size is HW dependent
* ucode has default fifo partition, sw can overwrite if necessary
*
* This is documented in twiki under the topic UcodeTxFifo. Please ensure
* the twiki is updated before making changes.
*/
/* Starting corerev for the fifo size table */
#define XMTFIFOTBL_STARTREV 17
struct d11init {
__le16 addr;
__le16 size;
__le32 value;
};
struct edcf_acparam {
u8 ACI;
u8 ECW;
u16 TXOP;
} __packed;
/* debug/trace */
uint brcm_msg_level;
/* TX FIFO number to WME/802.1E Access Category */
static const u8 wme_fifo2ac[] = {
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_BE,
IEEE80211_AC_BE
};
/* ieee80211 Access Category to TX FIFO number */
static const u8 wme_ac2fifo[] = {
TX_AC_VO_FIFO,
TX_AC_VI_FIFO,
TX_AC_BE_FIFO,
TX_AC_BK_FIFO
};
static const u16 xmtfifo_sz[][NFIFO] = {
/* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 18: */
{0, 0, 0, 0, 0, 0},
/* corerev 19: */
{0, 0, 0, 0, 0, 0},
/* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
/* corerev 22: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 23: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
/* corerev 25: */
{0, 0, 0, 0, 0, 0},
/* corerev 26: */
{0, 0, 0, 0, 0, 0},
/* corerev 27: */
{0, 0, 0, 0, 0, 0},
/* corerev 28: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
};
#ifdef DEBUG
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
static const char fifo_names[6][0];
#endif
#ifdef DEBUG
/* pointer to most recently allocated wl/wlc */
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
/* Mapping of ieee80211 AC numbers to tx fifos */
static const u8 ac_to_fifo_mapping[IEEE80211_NUM_ACS] = {
[IEEE80211_AC_VO] = TX_AC_VO_FIFO,
[IEEE80211_AC_VI] = TX_AC_VI_FIFO,
[IEEE80211_AC_BE] = TX_AC_BE_FIFO,
[IEEE80211_AC_BK] = TX_AC_BK_FIFO,
};
/* Mapping of tx fifos to ieee80211 AC numbers */
static const u8 fifo_to_ac_mapping[IEEE80211_NUM_ACS] = {
[TX_AC_BK_FIFO] = IEEE80211_AC_BK,
[TX_AC_BE_FIFO] = IEEE80211_AC_BE,
[TX_AC_VI_FIFO] = IEEE80211_AC_VI,
[TX_AC_VO_FIFO] = IEEE80211_AC_VO,
};
static u8 brcms_ac_to_fifo(u8 ac)
{
if (ac >= ARRAY_SIZE(ac_to_fifo_mapping))
return TX_AC_BE_FIFO;
return ac_to_fifo_mapping[ac];
}
static u8 brcms_fifo_to_ac(u8 fifo)
{
if (fifo >= ARRAY_SIZE(fifo_to_ac_mapping))
return IEEE80211_AC_BE;
return fifo_to_ac_mapping[fifo];
}
/* Find basic rate for a given rate */
static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
{
if (is_mcs_rate(rspec))
return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
.leg_ofdm];
return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
}
static u16 frametype(u32 rspec, u8 mimoframe)
{
if (is_mcs_rate(rspec))
return mimoframe;
return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
}
/* currently the best mechanism for determining SIFS is the band in use */
static u16 get_sifs(struct brcms_band *band)
{
return band->bandtype == BRCM_BAND_5G ? APHY_SIFS_TIME :
BPHY_SIFS_TIME;
}
/*
* Detect Card removed.
* Even checking an sbconfig register read will not false trigger when the core
* is in reset it breaks CF address mechanism. Accessing gphy phyversion will
* cause SB error if aphy is in reset on 4306B0-DB. Need a simple accessible
* reg with fixed 0/1 pattern (some platforms return all 0).
* If clocks are present, call the sb routine which will figure out if the
* device is removed.
*/
static bool brcms_deviceremoved(struct brcms_c_info *wlc)
{
u32 macctrl;
if (!wlc->hw->clk)
return ai_deviceremoved(wlc->hw->sih);
macctrl = bcma_read32(wlc->hw->d11core,
D11REGOFFS(maccontrol));
return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
}
/* sum the individual fifo tx pending packet counts */
static int brcms_txpktpendtot(struct brcms_c_info *wlc)
{
int i;
int pending = 0;
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
pending += dma_txpending(wlc->hw->di[i]);
return pending;
}
static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc)
{
return wlc->pub->_nbands > 1 && !wlc->bandlocked;
}
static int brcms_chspec_bw(u16 chanspec)
{
if (CHSPEC_IS40(chanspec))
return BRCMS_40_MHZ;
if (CHSPEC_IS20(chanspec))
return BRCMS_20_MHZ;
return BRCMS_10_MHZ;
}
static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
{
if (cfg == NULL)
return;
kfree(cfg->current_bss);
kfree(cfg);
}
static void brcms_c_detach_mfree(struct brcms_c_info *wlc)
{
if (wlc == NULL)
return;
brcms_c_bsscfg_mfree(wlc->bsscfg);
kfree(wlc->pub);
kfree(wlc->modulecb);
kfree(wlc->default_bss);
kfree(wlc->protection);
kfree(wlc->stf);
kfree(wlc->bandstate[0]);
kfree(wlc->corestate->macstat_snapshot);
kfree(wlc->corestate);
kfree(wlc->hw->bandstate[0]);
kfree(wlc->hw);
if (wlc->beacon)
dev_kfree_skb_any(wlc->beacon);
if (wlc->probe_resp)
dev_kfree_skb_any(wlc->probe_resp);
/* free the wlc */
kfree(wlc);
wlc = NULL;
}
static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
{
struct brcms_bss_cfg *cfg;
cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
if (cfg == NULL)
goto fail;
cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
if (cfg->current_bss == NULL)
goto fail;
return cfg;
fail:
brcms_c_bsscfg_mfree(cfg);
return NULL;
}
static struct brcms_c_info *
brcms_c_attach_malloc(uint unit, uint *err, uint devid)
{
struct brcms_c_info *wlc;
wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
if (wlc == NULL) {
*err = 1002;
goto fail;
}
/* allocate struct brcms_c_pub state structure */
wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
if (wlc->pub == NULL) {
*err = 1003;
goto fail;
}
wlc->pub->wlc = wlc;
/* allocate struct brcms_hardware state structure */
wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
if (wlc->hw == NULL) {
*err = 1005;
goto fail;
}
wlc->hw->wlc = wlc;
wlc->hw->bandstate[0] =
kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC);
if (wlc->hw->bandstate[0] == NULL) {
*err = 1006;
goto fail;
} else {
int i;
for (i = 1; i < MAXBANDS; i++)
wlc->hw->bandstate[i] = (struct brcms_hw_band *)
((unsigned long)wlc->hw->bandstate[0] +
(sizeof(struct brcms_hw_band) * i));
}
wlc->modulecb =
kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC);
if (wlc->modulecb == NULL) {
*err = 1009;
goto fail;
}
wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
if (wlc->default_bss == NULL) {
*err = 1010;
goto fail;
}
wlc->bsscfg = brcms_c_bsscfg_malloc(unit);
if (wlc->bsscfg == NULL) {
*err = 1011;
goto fail;
}
wlc->protection = kzalloc(sizeof(struct brcms_protection),
GFP_ATOMIC);
if (wlc->protection == NULL) {
*err = 1016;
goto fail;
}
wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
if (wlc->stf == NULL) {
*err = 1017;
goto fail;
}
wlc->bandstate[0] =
kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC);
if (wlc->bandstate[0] == NULL) {
*err = 1025;
goto fail;
} else {
int i;
for (i = 1; i < MAXBANDS; i++)
wlc->bandstate[i] = (struct brcms_band *)
((unsigned long)wlc->bandstate[0]
+ (sizeof(struct brcms_band)*i));
}
wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
if (wlc->corestate == NULL) {
*err = 1026;
goto fail;
}
wlc->corestate->macstat_snapshot =
kzalloc(sizeof(struct macstat), GFP_ATOMIC);
if (wlc->corestate->macstat_snapshot == NULL) {
*err = 1027;
goto fail;
}
return wlc;
fail:
brcms_c_detach_mfree(wlc);
return NULL;
}
/*
* Update the slot timing for standard 11b/g (20us slots)
* or shortslot 11g (9us slots)
* The PSM needs to be suspended for this call.
*/
static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
struct bcma_device *core = wlc_hw->d11core;
if (shortslot) {
/* 11g short slot: 11a timing */
bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
/*
* calculate frame duration of a given rate and length, return
* time in usec unit
*/
static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
u8 preamble_type, uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
if (rate == 0) {
brcms_err(wlc->hw->d11core, "wl%d: WAR: using rate of 1 mbps\n",
wlc->pub->unit);
rate = BRCM_RATE_1M;
}
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
dur = PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
if (preamble_type == BRCMS_MM_PREAMBLE)
dur += PREN_MM_EXT;
/* 1000Ndbps = kbps * 4 */
kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
rspec_issgi(ratespec)) * 4;
if (rspec_stc(ratespec) == 0)
nsyms =
CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
APHY_TAIL_NBITS) * 1000, kNdps);
else
/* STBC needs to have even number of symbols */
nsyms =
2 *
CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
APHY_TAIL_NBITS) * 1000, 2 * kNdps);
dur += APHY_SYMBOL_TIME * nsyms;
if (wlc->band->bandtype == BRCM_BAND_2G)
dur += DOT11_OFDM_SIGNAL_EXTENSION;
} else if (is_ofdm_rate(rate)) {
dur = APHY_PREAMBLE_TIME;
dur += APHY_SIGNAL_TIME;
/* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
Ndps = rate * 2;
/* NSyms = CEILING((SERVICE + 8*NBytes + TAIL) / Ndbps) */
nsyms =
CEIL((APHY_SERVICE_NBITS + 8 * mac_len + APHY_TAIL_NBITS),
Ndps);
dur += APHY_SYMBOL_TIME * nsyms;
if (wlc->band->bandtype == BRCM_BAND_2G)
dur += DOT11_OFDM_SIGNAL_EXTENSION;
} else {
/*
* calc # bits * 2 so factor of 2 in rate (1/2 mbps)
* will divide out
*/
mac_len = mac_len * 8 * 2;
/* calc ceiling of bits/rate = microseconds of air time */
dur = (mac_len + rate - 1) / rate;
if (preamble_type & BRCMS_SHORT_PREAMBLE)
dur += BPHY_PLCP_SHORT_TIME;
else
dur += BPHY_PLCP_TIME;
}
return dur;
}
static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
struct bcma_device *core = wlc_hw->d11core;
int i;
uint offset;
u16 size;
u32 value;
brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
offset = le16_to_cpu(inits[i].addr);
value = le32_to_cpu(inits[i].value);
if (size == 2)
bcma_write16(core, offset, value);
else if (size == 4)
bcma_write32(core, offset, value);
else
break;
}
}
static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
{
u8 idx;
u16 addr[] = {
M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
M_HOST_FLAGS5
};
for (idx = 0; idx < MHFMAX; idx++)
brcms_b_write_shm(wlc_hw, addr[idx], mhfs[idx]);
}
static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
{
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
/* init microcode host flags */
brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
/* do band-specific ucode IHR, SHM, and SCR inits */
if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16);
else
brcms_err(wlc_hw->d11core,
"%s: wl%d: unsupported phy in corerev %d\n",
__func__, wlc_hw->unit,
wlc_hw->corerev);
} else {
if (D11REV_IS(wlc_hw->corerev, 24)) {
if (BRCMS_ISLCNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw,
ucode->d11lcn0bsinitvals24);
else
brcms_err(wlc_hw->d11core,
"%s: wl%d: unsupported phy in core rev %d\n",
__func__, wlc_hw->unit,
wlc_hw->corerev);
} else {
brcms_err(wlc_hw->d11core,
"%s: wl%d: unsupported corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
}
}
}
static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
{
struct bcma_device *core = wlc_hw->d11core;
u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
}
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
brcms_dbg_info(wlc_hw->d11core, "wl%d: clk %d\n", wlc_hw->unit, clk);
wlc_hw->phyclk = clk;
if (OFF == clk) { /* clear gmode bit, put phy into reset */
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
(SICF_PRST | SICF_FGC));
udelay(1);
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
udelay(1);
} else { /* take phy out of reset */
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
udelay(1);
brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
udelay(1);
}
}
/* low-level band switch utility routine */
static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
{
brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
bandunit);
wlc_hw->band = wlc_hw->bandstate[bandunit];
/*
* BMAC_NOTE:
* until we eliminate need for wlc->band refs in low level code
*/
wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
/* set gmode core flag */
if (wlc_hw->sbclk && !wlc_hw->noreset) {
u32 gmode = 0;
if (bandunit == 0)
gmode = SICF_GMODE;
brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
}
}
/* switch to new band but leave it inactive */
static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
u32 macctrl;
brcms_dbg_mac80211(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
macctrl = bcma_read32(wlc_hw->d11core,
D11REGOFFS(maccontrol));
WARN_ON((macctrl & MCTL_EN_MAC) != 0);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
/* radio off */
wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
brcms_b_core_phy_clk(wlc_hw, OFF);
brcms_c_setxband(wlc_hw, bandunit);
return macintmask;
}
/* process an individual struct tx_status */
static bool
brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
{
struct sk_buff *p = NULL;
uint queue = NFIFO;
struct dma_pub *dma = NULL;
struct d11txh *txh = NULL;
struct scb *scb = NULL;
bool free_pdu;
int tx_rts, tx_frame_count, tx_rts_count;
uint totlen, supr_status;
bool lastframe;
struct ieee80211_hdr *h;
u16 mcl;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *txrate;
int i;
bool fatal = true;
trace_brcms_txstatus(&wlc->hw->d11core->dev, txs->framelen,
txs->frameid, txs->status, txs->lasttxtime,
txs->sequence, txs->phyerr, txs->ackphyrxsh);
/* discard intermediate indications for ucode with one legitimate case:
* e.g. if "useRTS" is set. ucode did a successful rts/cts exchange,
* but the subsequent tx of DATA failed. so it will start rts/cts
* from the beginning (resetting the rts transmission count)
*/
if (!(txs->status & TX_STATUS_AMPDU)
&& (txs->status & TX_STATUS_INTERMEDIATE)) {
brcms_dbg_tx(wlc->hw->d11core, "INTERMEDIATE but not AMPDU\n");
fatal = false;
goto out;
}
queue = txs->frameid & TXFID_QUEUE_MASK;
if (queue >= NFIFO) {
brcms_err(wlc->hw->d11core, "queue %u >= NFIFO\n", queue);
goto out;
}
dma = wlc->hw->di[queue];
p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
if (p == NULL) {
brcms_err(wlc->hw->d11core, "dma_getnexttxp returned null!\n");
goto out;
}
txh = (struct d11txh *) (p->data);
mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr)
brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
txs->phyerr, txh->MainRates);
if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
goto out;
}
tx_info = IEEE80211_SKB_CB(p);
h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
if (tx_info->rate_driver_data[0])
scb = &wlc->pri_scb;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
fatal = false;
goto out;
}
/*
* brcms_c_ampdu_dotxstatus() will trace tx descriptors for AMPDU
* frames; this traces them for the rest.
*/
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (supr_status == TX_STATUS_SUPR_BADCH) {
unsigned xfts = le16_to_cpu(txh->XtraFrameTypes);
brcms_dbg_tx(wlc->hw->d11core,
"Pkt tx suppressed, dest chan %u, current %d\n",
(xfts >> XFTS_CHANNEL_SHIFT) & 0xff,
CHSPEC_CHANNEL(wlc->default_bss->chanspec));
}
tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
(txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
tx_rts_count =
(txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
lastframe = !ieee80211_has_morefrags(h->frame_control);
if (!lastframe) {
brcms_err(wlc->hw->d11core, "Not last frame!\n");
} else {
/*
* Set information to be consumed by Minstrel ht.
*
* The "fallback limit" is the number of tx attempts a given
* MPDU is sent at the "primary" rate. Tx attempts beyond that
* limit are sent at the "secondary" rate.
* A 'short frame' does not exceed RTS treshold.
*/
u16 sfbl, /* Short Frame Rate Fallback Limit */
lfbl, /* Long Frame Rate Fallback Limit */
fbl;
if (queue < IEEE80211_NUM_ACS) {
sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_SFB);
lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_LFB);
} else {
sfbl = wlc->SFBL;
lfbl = wlc->LFBL;
}
txrate = tx_info->status.rates;
if (txrate[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
fbl = lfbl;
else
fbl = sfbl;
ieee80211_tx_info_clear_status(tx_info);
if ((tx_frame_count > fbl) && (txrate[1].idx >= 0)) {
/*
* rate selection requested a fallback rate
* and we used it
*/
txrate[0].count = fbl;
txrate[1].count = tx_frame_count - fbl;
} else {
/*
* rate selection did not request fallback rate, or
* we didn't need it
*/
txrate[0].count = tx_frame_count;
/*
* rc80211_minstrel.c:minstrel_tx_status() expects
* unused rates to be marked with idx = -1
*/
txrate[1].idx = -1;
txrate[1].count = 0;
}
/* clear the rest of the rates */
for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
txrate[i].idx = -1;
txrate[i].count = 0;
}
if (txs->status & TX_STATUS_ACK_RCV)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
totlen = p->len;
free_pdu = true;
if (lastframe) {
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
} else {
brcms_err(wlc->hw->d11core,
"%s: Not last frame => not calling tx_status\n",
__func__);
}
fatal = false;
out:
if (fatal) {
if (txh)
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
sizeof(*txh));
if (p)
brcmu_pkt_buf_free_skb(p);
}
if (dma && queue < NFIFO) {
u16 ac_queue = brcms_fifo_to_ac(queue);
if (dma->txavail > TX_HEADROOM && queue < TX_BCMC_FIFO &&
ieee80211_queue_stopped(wlc->pub->ieee_hw, ac_queue))
ieee80211_wake_queue(wlc->pub->ieee_hw, ac_queue);
dma_kick_tx(dma);
}
return fatal;
}
/* process tx completion events in BMAC
* Return true if more tx status need to be processed. false otherwise.
*/
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
/*
* Param 'max_tx_num' indicates max. # tx status to process before
* break out.
*/
uint max_tx_num = bound ? TXSBND : -1;
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
while (n < max_tx_num) {
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
*fatal = true;
return false;
}
/* only process when valid */
if (!(s1 & TXS_V))
break;
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
txs->phyerr = (s2 & TXS_PTX_MASK) >> TXS_PTX_SHIFT;
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
if (*fatal == true)
return false;
n++;
}
return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
{
if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
/*
* DirFrmQ is now valid...defer setting until end
* of ATIM window
*/
wlc->qvalid |= MCMD_DIRFRMQVAL;
}
/* set initial host flags value */
static void
brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
{
struct brcms_hardware *wlc_hw = wlc->hw;
memset(mhfs, 0, MHFMAX * sizeof(u16));
mhfs[MHF2] |= mhf2_init;
/* prohibit use of slowclock on multifunction boards */
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
mhfs[MHF1] |= MHF1_FORCEFASTCLK;
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) {
mhfs[MHF2] |= MHF2_NPHY40MHZ_WAR;
mhfs[MHF1] |= MHF1_IQSWAP_WAR;
}
}
static uint
dmareg(uint direction, uint fifonum)
{
if (direction == DMA_TX)
return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
}
static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
{
uint i;
char name[8];
/*
* ucode host flag 2 needed for pio mode, independent of band and fifo
*/
u16 pio_mhf2 = 0;
struct brcms_hardware *wlc_hw = wlc->hw;
uint unit = wlc_hw->unit;
/* name and offsets for dma_attach */
snprintf(name, sizeof(name), "wl%d", unit);
if (wlc_hw->di[0] == NULL) { /* Init FIFOs */
int dma_attach_err = 0;
/*
* FIFO 0
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
wlc_hw->di[0] = dma_attach(name, wlc,
(wme ? dmareg(DMA_TX, 0) : 0),
dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
BRCMS_HWRXOFF);
dma_attach_err |= (NULL == wlc_hw->di[0]);
/*
* FIFO 1
* TX: TX_AC_BE_FIFO (TX AC Best-Effort data packets)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
wlc_hw->di[1] = dma_attach(name, wlc,
dmareg(DMA_TX, 1), 0,
NTXD, 0, 0, -1, 0, 0);
dma_attach_err |= (NULL == wlc_hw->di[1]);
/*
* FIFO 2
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
wlc_hw->di[2] = dma_attach(name, wlc,
dmareg(DMA_TX, 2), 0,
NTXD, 0, 0, -1, 0, 0);
dma_attach_err |= (NULL == wlc_hw->di[2]);
/*
* FIFO 3
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
wlc_hw->di[3] = dma_attach(name, wlc,
dmareg(DMA_TX, 3),
0, NTXD, 0, 0, -1,
0, 0);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
if (dma_attach_err) {
brcms_err(wlc_hw->d11core,
"wl%d: wlc_attach: dma_attach failed\n",
unit);
return false;
}
/* get pointer to dma engine tx flow control variable */
for (i = 0; i < NFIFO; i++)
if (wlc_hw->di[i])
wlc_hw->txavail[i] =
(uint *) dma_getvar(wlc_hw->di[i],
"&txavail");
}
/* initial ucode host flags */
brcms_c_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2);
return true;
}
static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw)
{
uint j;
for (j = 0; j < NFIFO; j++) {
if (wlc_hw->di[j]) {
dma_detach(wlc_hw->di[j]);
wlc_hw->di[j] = NULL;
}
}
}
/*
* Initialize brcms_c_info default values ...
* may get overrides later in this function
* BMAC_NOTES, move low out and resolve the dangling ones
*/
static void brcms_b_info_init(struct brcms_hardware *wlc_hw)
{
struct brcms_c_info *wlc = wlc_hw->wlc;
/* set default sw macintmask value */
wlc->defmacintmask = DEF_MACINTMASK;
/* various 802.11g modes */
wlc_hw->shortslot = false;
wlc_hw->SFBL = RETRY_SHORT_FB;
wlc_hw->LFBL = RETRY_LONG_FB;
/* default mac retry limits */
wlc_hw->SRL = RETRY_SHORT_DEF;
wlc_hw->LRL = RETRY_LONG_DEF;
wlc_hw->chanspec = ch20mhz_chspec(1);
}
static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
{
/* delay before first read of ucode state */
udelay(40);
/* wait until ucode is no longer asleep */
SPINWAIT((brcms_b_read_shm(wlc_hw, M_UCODE_DBGST) ==
DBGST_ASLEEP), wlc_hw->wlc->fastpwrup_dly);
}
/* control chip clock to save power, enable dynamic clock or force fast clock */
static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, enum bcma_clkmode mode)
{
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
* on backplane, but mac core will still run on ALP(not HT) when
* it enters powersave mode, which means the FCA bit may not be
* set. Should wakeup mac if driver wants it to run on HT.
*/
if (wlc_hw->clk) {
if (mode == BCMA_CLKMODE_FAST) {
bcma_set32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st),
CCS_FORCEHT);
udelay(64);
SPINWAIT(
((bcma_read32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st)) &
CCS_HTAVAIL) == 0),
PMU_MAX_TRANSITION_DLY);
WARN_ON(!(bcma_read32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st)) &
CCS_HTAVAIL));
} else {
if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
(bcma_read32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st)) &
(CCS_FORCEHT | CCS_HTAREQ)))
SPINWAIT(
((bcma_read32(wlc_hw->d11core,
offsetof(struct d11regs,
clk_ctl_st)) &
CCS_HTAVAIL) == 0),
PMU_MAX_TRANSITION_DLY);
bcma_mask32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st),
~CCS_FORCEHT);
}
}
wlc_hw->forcefastclk = (mode == BCMA_CLKMODE_FAST);
} else {
/* old chips w/o PMU, force HT through cc,
* then use FCA to verify mac is running fast clock
*/
wlc_hw->forcefastclk = ai_clkctl_cc(wlc_hw->sih, mode);
/* check fast clock is available (if core is not in reset) */
if (wlc_hw->forcefastclk && wlc_hw->clk)
WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
SISF_FCLKA));
/*
* keep the ucode wake bit on if forcefastclk is on since we
* do not want ucode to put us back to slow clock when it dozes
* for PM mode. Code below matches the wake override bit with
* current forcefastclk state. Only setting bit in wake_override
* instead of waking ucode immediately since old code had this
* behavior. Older code set wlc->forcefastclk but only had the
* wake happen if the wakup_ucode work (protected by an up
* check) was executed just below.
*/
if (wlc_hw->forcefastclk)
mboolset(wlc_hw->wake_override,
BRCMS_WAKE_OVERRIDE_FORCEFAST);
else
mboolclr(wlc_hw->wake_override,
BRCMS_WAKE_OVERRIDE_FORCEFAST);
}
}
/* set or clear ucode host flag bits
* it has an optimization for no-change write
* it only writes through shared memory when the core has clock;
* pre-CLK changes should use wlc_write_mhf to get around the optimization
*
*
* bands values are: BRCM_BAND_AUTO <--- Current band only
* BRCM_BAND_5G <--- 5G band only
* BRCM_BAND_2G <--- 2G band only
* BRCM_BAND_ALL <--- All bands
*/
void
brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
int bands)
{
u16 save;
u16 addr[MHFMAX] = {
M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
M_HOST_FLAGS5
};
struct brcms_hw_band *band;
if ((val & ~mask) || idx >= MHFMAX)
return; /* error condition */
switch (bands) {
/* Current band only or all bands,
* then set the band to current band
*/
case BRCM_BAND_AUTO:
case BRCM_BAND_ALL:
band = wlc_hw->band;
break;
case BRCM_BAND_5G:
band = wlc_hw->bandstate[BAND_5G_INDEX];
break;
case BRCM_BAND_2G:
band = wlc_hw->bandstate[BAND_2G_INDEX];
break;
default:
band = NULL; /* error condition */
}
if (band) {
save = band->mhfs[idx];
band->mhfs[idx] = (band->mhfs[idx] & ~mask) | val;
/* optimization: only write through if changed, and
* changed band is the current band
*/
if (wlc_hw->clk && (band->mhfs[idx] != save)
&& (band == wlc_hw->band))
brcms_b_write_shm(wlc_hw, addr[idx],
(u16) band->mhfs[idx]);
}
if (bands == BRCM_BAND_ALL) {
wlc_hw->bandstate[0]->mhfs[idx] =
(wlc_hw->bandstate[0]->mhfs[idx] & ~mask) | val;
wlc_hw->bandstate[1]->mhfs[idx] =
(wlc_hw->bandstate[1]->mhfs[idx] & ~mask) | val;
}
}
/* set the maccontrol register to desired reset state and
* initialize the sw cache of the register
*/
static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw)
{
/* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
wlc_hw->maccontrol = 0;
wlc_hw->suspended_fifos = 0;
wlc_hw->wake_override = 0;
wlc_hw->mute_override = 0;
brcms_b_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE);
}
/*
* write the software state of maccontrol and
* overrides to the maccontrol register
*/
static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
{
u32 maccontrol = wlc_hw->maccontrol;
/* OR in the wake bit if overridden */
if (wlc_hw->wake_override)
maccontrol |= MCTL_WAKE;
/* set AP and INFRA bits for mute if needed */
if (wlc_hw->mute_override) {
maccontrol &= ~(MCTL_AP);
maccontrol |= MCTL_INFRA;
}
bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
maccontrol);
}
/* set or clear maccontrol bits */
void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val)
{
u32 maccontrol;
u32 new_maccontrol;
if (val & ~mask)
return; /* error condition */
maccontrol = wlc_hw->maccontrol;
new_maccontrol = (maccontrol & ~mask) | val;
/* if the new maccontrol value is the same as the old, nothing to do */
if (new_maccontrol == maccontrol)
return;
/* something changed, cache the new value */
wlc_hw->maccontrol = new_maccontrol;
/* write the new values with overrides applied */
brcms_c_mctrl_write(wlc_hw);
}
void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
u32 override_bit)
{
if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) {
mboolset(wlc_hw->wake_override, override_bit);
return;
}
mboolset(wlc_hw->wake_override, override_bit);
brcms_c_mctrl_write(wlc_hw);
brcms_b_wait_for_wake(wlc_hw);
}
void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
u32 override_bit)
{
mboolclr(wlc_hw->wake_override, override_bit);
if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE))
return;
brcms_c_mctrl_write(wlc_hw);
}
/* When driver needs ucode to stop beaconing, it has to make sure that
* MCTL_AP is clear and MCTL_INFRA is set
* Mode MCTL_AP MCTL_INFRA
* AP 1 1
* STA 0 1 <--- This will ensure no beacons
* IBSS 0 0
*/
static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw)
{
wlc_hw->mute_override = 1;
/* if maccontrol already has AP == 0 and INFRA == 1 without this
* override, then there is no change to write
*/
if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
return;
brcms_c_mctrl_write(wlc_hw);
}
/* Clear the override on AP and INFRA bits */
static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw)
{
if (wlc_hw->mute_override == 0)
return;
wlc_hw->mute_override = 0;
/* if maccontrol already has AP == 0 and INFRA == 1 without this
* override, then there is no change to write
*/
if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
return;
brcms_c_mctrl_write(wlc_hw);
}
/*
* Write a MAC address to the given match reg offset in the RXE match engine.
*/
static void
brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
struct bcma_device *core = wlc_hw->d11core;
u16 mac_l;
u16 mac_m;
u16 mac_h;
brcms_dbg_rx(core, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit);
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
mac_h = addr[4] | (addr[5] << 8);
/* enter the MAC addr into the RXE match registers */
bcma_write16(core, D11REGOFFS(rcm_ctl),
RCM_INC_DATA | match_reg_offset);
bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
}
void
brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
struct bcma_device *core = wlc_hw->d11core;
u32 word;
__le32 word_le;
__be32 word_be;
bool be_bit;
brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
/* if MCTL_BIGEND bit set in mac control register,
* the chip swaps data in fifo, as well as data in
* template ram
*/
be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
while (len > 0) {
memcpy(&word, buf, sizeof(u32));
if (be_bit) {
word_be = cpu_to_be32(word);
word = *(u32 *)&word_be;
} else {
word_le = cpu_to_le32(word);
word = *(u32 *)&word_le;
}
bcma_write32(core, D11REGOFFS(tplatewrdata), word);
buf = (u8 *) buf + sizeof(u32);
len -= sizeof(u32);
}
}
static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_CWMIN);
(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
}
static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_CWMAX);
(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
}
void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
{
bool fastclk;
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
brcms_b_phy_reset(wlc_hw);
wlc_phy_init(wlc_hw->band->pi, wlc_phy_chanspec_get(wlc_hw->band->pi));
/* restore the clk */
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
{
u16 v;
struct brcms_c_info *wlc = wlc_hw->wlc;
/* update SYNTHPU_DLY */
if (BRCMS_ISLCNPHY(wlc->band))
v = SYNTHPU_DLY_LPPHY_US;
else if (BRCMS_ISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3)))
v = SYNTHPU_DLY_NPHY_US;
else
v = SYNTHPU_DLY_BPHY_US;
brcms_b_write_shm(wlc_hw, M_SYNTHPU_DLY, v);
}
static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw)
{
u16 phyctl;
u16 phytxant = wlc_hw->bmac_phytxant;
u16 mask = PHY_TXC_ANT_MASK;
/* set the Probe Response frame phy control word */
phyctl = brcms_b_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS);
phyctl = (phyctl & ~mask) | phytxant;
brcms_b_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl);
/* set the Response (ACK/CTS) frame phy control word */
phyctl = brcms_b_read_shm(wlc_hw, M_RSP_PCTLWD);
phyctl = (phyctl & ~mask) | phytxant;
brcms_b_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
}
static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
u8 rate)
{
uint i;
u8 plcp_rate = 0;
struct plcp_signal_rate_lookup {
u8 rate;
u8 signal_rate;
};
/* OFDM RATE sub-field of PLCP SIGNAL field, per 802.11 sec 17.3.4.1 */
const struct plcp_signal_rate_lookup rate_lookup[] = {
{BRCM_RATE_6M, 0xB},
{BRCM_RATE_9M, 0xF},
{BRCM_RATE_12M, 0xA},
{BRCM_RATE_18M, 0xE},
{BRCM_RATE_24M, 0x9},
{BRCM_RATE_36M, 0xD},
{BRCM_RATE_48M, 0x8},
{BRCM_RATE_54M, 0xC}
};
for (i = 0; i < ARRAY_SIZE(rate_lookup); i++) {
if (rate == rate_lookup[i].rate) {
plcp_rate = rate_lookup[i].signal_rate;
break;
}
}
/* Find the SHM pointer to the rate table entry by looking in the
* Direct-map Table
*/
return 2 * brcms_b_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
}
static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw)
{
u8 rate;
u8 rates[8] = {
BRCM_RATE_6M, BRCM_RATE_9M, BRCM_RATE_12M, BRCM_RATE_18M,
BRCM_RATE_24M, BRCM_RATE_36M, BRCM_RATE_48M, BRCM_RATE_54M
};
u16 entry_ptr;
u16 pctl1;
uint i;
if (!BRCMS_PHY_11N_CAP(wlc_hw->band))
return;
/* walk the phy rate table and update the entries */
for (i = 0; i < ARRAY_SIZE(rates); i++) {
rate = rates[i];
entry_ptr = brcms_b_ofdm_ratetable_offset(wlc_hw, rate);
/* read the SHM Rate Table entry OFDM PCTL1 values */
pctl1 =
brcms_b_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS);
/* modify the value */
pctl1 &= ~PHY_TXC1_MODE_MASK;
pctl1 |= (wlc_hw->hw_stf_ss_opmode << PHY_TXC1_MODE_SHIFT);
/* Update the SHM Rate Table entry OFDM PCTL1 values */
brcms_b_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS,
pctl1);
}
}
/* band-specific init */
static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
{
struct brcms_hardware *wlc_hw = wlc->hw;
brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
wlc_hw->band->bandunit);
brcms_c_ucode_bsinit(wlc_hw);
wlc_phy_init(wlc_hw->band->pi, chanspec);
brcms_c_ucode_txant_set(wlc_hw);
/*
* cwmin is band-specific, update hardware
* with value for current band
*/
brcms_b_set_cwmin(wlc_hw, wlc_hw->band->CWmin);
brcms_b_set_cwmax(wlc_hw, wlc_hw->band->CWmax);
brcms_b_update_slot_timing(wlc_hw,
wlc_hw->band->bandtype == BRCM_BAND_5G ?
true : wlc_hw->shortslot);
/* write phytype and phyvers */
brcms_b_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype);
brcms_b_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev);
/*
* initialize the txphyctl1 rate table since
* shmem is shared between bands
*/
brcms_upd_ofdm_pctl1_table(wlc_hw);
brcms_b_upd_synthpu(wlc_hw);
}
/* Perform a soft reset of the PHY PLL */
void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
~0, 0);
udelay(1);
ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
0x4, 0);
udelay(1);
ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
0x4, 4);
udelay(1);
ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
0x4, 0);
udelay(1);
}
/* light way to turn on phy clock without reset for NPHY only
* refer to brcms_b_core_phy_clk for full version
*/
void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
{
/* support(necessary for NPHY and HYPHY) only */
if (!BRCMS_ISNPHY(wlc_hw->band))
return;
if (ON == clk)
brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
else
brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
}
void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
else
brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
}
void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
{
struct brcms_phy_pub *pih = wlc_hw->band->pi;
u32 phy_bw_clkbits;
bool phy_in_reset = false;
brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
if (pih == NULL)
return;
phy_bw_clkbits = wlc_phy_clk_bwbits(wlc_hw->band->pi);
/* Specific reset sequence required for NPHY rev 3 and 4 */
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
udelay(1);
/* Perform a soft reset of the PHY PLL */
brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
(SICF_PRST | SICF_PCLKE));
phy_in_reset = true;
} else {
brcms_b_core_ioctl(wlc_hw,
(SICF_PRST | SICF_PCLKE | SICF_BWMASK),
(SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
}
udelay(2);
brcms_b_core_phy_clk(wlc_hw, ON);
if (pih)
wlc_phy_anacore(pih, ON);
}
/* switch to and initialize new band */
static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
u16 chanspec) {
struct brcms_c_info *wlc = wlc_hw->wlc;
u32 macintmask;
/* Enable the d11 core before accessing it */
if (!bcma_core_is_enabled(wlc_hw->d11core)) {
bcma_core_enable(wlc_hw->d11core, 0);
brcms_c_mctrl_reset(wlc_hw);
}
macintmask = brcms_c_setband_inact(wlc, bandunit);
if (!wlc_hw->up)
return;
brcms_b_core_phy_clk(wlc_hw, ON);
/* band-specific initializations */
brcms_b_bsinit(wlc, chanspec);
/*
* If there are any pending software interrupt bits,
* then replace these with a harmless nonzero value
* so brcms_c_dpc() will re-enable interrupts when done.
*/
if (wlc->macintstatus)
wlc->macintstatus = MI_DMAINT;
/* restore macintmask */
brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC) != 0);
}
static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
{
/* reject unsupported corerev */
if (!CONF_HAS(D11CONF, wlc_hw->corerev)) {
wiphy_err(wlc_hw->wlc->wiphy, "unsupported core rev %d\n",
wlc_hw->corerev);
return false;
}
return true;
}
/* Validate some board info parameters */
static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
{
uint boardrev = wlc_hw->boardrev;
/* 4 bits each for board type, major, minor, and tiny version */
uint brt = (boardrev & 0xf000) >> 12;
uint b0 = (boardrev & 0xf00) >> 8;
uint b1 = (boardrev & 0xf0) >> 4;
uint b2 = boardrev & 0xf;
/* voards from other vendors are always considered valid */
if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
return true;
/* do some boardrev sanity checks when boardvendor is Broadcom */
if (boardrev == 0)
return false;
if (boardrev <= 0xff)
return true;
if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9)
|| (b2 > 9))
return false;
return true;
}
static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_ALEN])
{
struct ssb_sprom *sprom = &wlc_hw->d11core->bus->sprom;
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
if (!is_zero_ether_addr(sprom->il0mac)) {
memcpy(etheraddr, sprom->il0mac, 6);
return;
}
if (wlc_hw->_nbands > 1)
memcpy(etheraddr, sprom->et1mac, 6);
else
memcpy(etheraddr, sprom->il0mac, 6);
}
/* power both the pll and external oscillator on/off */
static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
{
brcms_dbg_info(wlc_hw->d11core, "wl%d: want %d\n", wlc_hw->unit, want);
/*
* dont power down if plldown is false or
* we must poll hw radio disable
*/
if (!want && wlc_hw->pllreq)
return;
wlc_hw->sbclk = want;
if (!wlc_hw->sbclk) {
wlc_hw->clk = false;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
}
/*
* Return true if radio is disabled, otherwise false.
* hw radio disable signal is an external pin, users activate it asynchronously
* this function could be called when driver is down and w/o clock
* it operates on different registers depending on corerev and boardflag.
*/
static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
u32 flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
brcms_b_xtal(wlc_hw, ON);
/* may need to take core out of reset first */
clk = wlc_hw->clk;
if (!clk) {
/*
* mac no longer enables phyclk automatically when driver
* accesses phyreg throughput mac. This can be skipped since
* only mac reg is accessed below
*/
if (D11REV_GE(wlc_hw->corerev, 18))
flags |= SICF_PCLKE;
/*
* TODO: test suspend/resume
*
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
bcma_core_enable(wlc_hw->d11core, flags);
brcms_c_mctrl_reset(wlc_hw);
}
v = ((bcma_read32(wlc_hw->d11core,
D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
/* put core back into reset */
if (!clk)
bcma_core_disable(wlc_hw->d11core, 0);
if (!xtal)
brcms_b_xtal(wlc_hw, OFF);
return v;
}
static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
{
struct dma_pub *di = wlc_hw->di[fifo];
return dma_rxreset(di);
}
/* d11 core reset
* ensure fask clock during reset
* reset dma
* reset d11(out of reset)
* reset phy(out of reset)
* clear software macintstatus for fresh new start
* one testing hack wlc_hw->noreset will bypass the d11/phy reset
*/
void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
uint i;
bool fastclk;
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
brcms_dbg_info(wlc_hw->d11core, "wl%d: core reset\n", wlc_hw->unit);
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* reset the dma engines except first time thru */
if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
brcms_err(wlc_hw->d11core, "wl%d: %s: "
"dma_txreset[%d]: cannot stop dma\n",
wlc_hw->unit, __func__, i);
if ((wlc_hw->di[RX_FIFO])
&& (!wlc_dma_rxreset(wlc_hw, RX_FIFO)))
brcms_err(wlc_hw->d11core, "wl%d: %s: dma_rxreset"
"[%d]: cannot stop dma\n",
wlc_hw->unit, __func__, RX_FIFO);
}
/* if noreset, just stop the psm and return */
if (wlc_hw->noreset) {
wlc_hw->wlc->macintstatus = 0; /* skip wl_dpc after down */
brcms_b_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0);
return;
}
/*
* mac no longer enables phyclk automatically when driver accesses
* phyreg throughput mac, AND phy_reset is skipped at early stage when
* band->pi is invalid. need to enable PHY CLK
*/
if (D11REV_GE(wlc_hw->corerev, 18))
flags |= SICF_PCLKE;
/*
* reset the core
* In chips with PMU, the fastclk request goes through d11 core
* reg 0x1e0, which is cleared by the core_reset. have to re-request it.
*
* This adds some delay and we can optimize it by also requesting
* fastclk through chipcommon during this period if necessary. But
* that has to work coordinate with other driver like mips/arm since
* they may touch chipcommon as well.
*/
wlc_hw->clk = false;
bcma_core_enable(wlc_hw->d11core, flags);
wlc_hw->clk = true;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
brcms_c_mctrl_reset(wlc_hw);
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_phy_reset(wlc_hw);
/* turn on PHY_PLL */
brcms_b_core_phypll_ctl(wlc_hw, true);
/* clear sw intstatus */
wlc_hw->wlc->macintstatus = 0;
/* restore the clk setting */
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
/* txfifo sizes needs to be modified(increased) since the newer cores
* have more memory.
*/
static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
struct bcma_device *core = wlc_hw->d11core;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
u16 txfifo_cmd;
/* tx fifos start at TXFIFO_START_BLK from the Base address */
txfifo_startblk = TXFIFO_START_BLK;
/* sequence of operations: reset fifo, set fifo size, reset fifo */
for (fifo_nu = 0; fifo_nu < NFIFO; fifo_nu++) {
txfifo_endblk = txfifo_startblk + wlc_hw->xmtfifo_sz[fifo_nu];
txfifo_def = (txfifo_startblk & 0xff) |
(((txfifo_endblk - 1) & 0xff) << TXFIFO_FIFOTOP_SHIFT);
txfifo_def1 = ((txfifo_startblk >> 8) & 0x1) |
((((txfifo_endblk -
1) >> 8) & 0x1) << TXFIFO_FIFOTOP_SHIFT);
txfifo_cmd =
TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
}
/*
* need to propagate to shm location to be in sync since ucode/hw won't
* do this
*/
brcms_b_write_shm(wlc_hw, M_FIFOSIZE0,
wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]);
brcms_b_write_shm(wlc_hw, M_FIFOSIZE1,
wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]);
brcms_b_write_shm(wlc_hw, M_FIFOSIZE2,
((wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO] << 8) | wlc_hw->
xmtfifo_sz[TX_AC_BK_FIFO]));
brcms_b_write_shm(wlc_hw, M_FIFOSIZE3,
((wlc_hw->xmtfifo_sz[TX_ATIM_FIFO] << 8) | wlc_hw->
xmtfifo_sz[TX_BCMC_FIFO]));
}
/* This function is used for changing the tsf frac register
* If spur avoidance mode is off, the mac freq will be 80/120/160Mhz
* If spur avoidance mode is on1, the mac freq will be 82/123/164Mhz
* If spur avoidance mode is on2, the mac freq will be 84/126/168Mhz
* HTPHY Formula is 2^26/freq(MHz) e.g.
* For spuron2 - 126MHz -> 2^26/126 = 532610.0
* - 532610 = 0x82082 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x2082
* For spuron: 123MHz -> 2^26/123 = 545600.5
* - 545601 = 0x85341 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x5341
* For spur off: 120MHz -> 2^26/120 = 559240.5
* - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
*/
void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
struct bcma_device *core = wlc_hw->d11core;
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43224) ||
(ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else { /* 120Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
}
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
} else { /* 80Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
}
}
}
void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr)
{
memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
wlc->bsscfg->type = BRCMS_TYPE_STATION;
}
void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
u8 *ssid, size_t ssid_len)
{
brcms_c_set_ssid(wlc, ssid, ssid_len);
memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
memcpy(wlc->bsscfg->BSSID, bssid, sizeof(wlc->bsscfg->BSSID));
wlc->bsscfg->type = BRCMS_TYPE_AP;
brcms_b_mctrl(wlc->hw, MCTL_AP | MCTL_INFRA, MCTL_AP | MCTL_INFRA);
}
void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr)
{
memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
wlc->bsscfg->type = BRCMS_TYPE_ADHOC;
brcms_b_mctrl(wlc->hw, MCTL_AP | MCTL_INFRA, 0);
}
/* Initialize GPIOs that are controlled by D11 core */
static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 gc, gm;
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
/*
* Common GPIO setup:
* G0 = LED 0 = WLAN Activity
* G1 = LED 1 = WLAN 2.4 GHz Radio State
* G2 = LED 2 = WLAN 5 GHz Radio State
* G4 = radio disable input (HI enabled, LO disabled)
*/
gc = gm = 0;
/* Allocate GPIOs for mimo antenna diversity feature */
if (wlc_hw->antsel_type == ANTSEL_2x3) {
/* Enable antenna diversity, use 2x3 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
MHF3_ANTSEL_EN, BRCM_BAND_ALL);
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE,
MHF3_ANTSEL_MODE, BRCM_BAND_ALL);
/* init superswitch control */
wlc_phy_antsel_init(wlc_hw->band->pi, false);
} else if (wlc_hw->antsel_type == ANTSEL_2x4) {
gm |= gc |= (BOARD_GPIO_12 | BOARD_GPIO_13);
/*
* The board itself is powered by these GPIOs
* (when not sending pattern) so set them high
*/
bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
(BOARD_GPIO_12 | BOARD_GPIO_13));
bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
(BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
MHF3_ANTSEL_EN, BRCM_BAND_ALL);
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0,
BRCM_BAND_ALL);
/* Configure the desired clock to be 4Mhz */
brcms_b_write_shm(wlc_hw, M_ANTSEL_CLKDIV,
ANTSEL_CLKDIV_4MHZ);
}
/*
* gpio 9 controls the PA. ucode is responsible
* for wiggling out and oe
*/
if (wlc_hw->boardflags & BFL_PACTRL)
gm |= gc |= BOARD_GPIO_PACTRL;
/* apply to gpiocontrol register */
bcma_chipco_gpio_control(&wlc_hw->d11core->bus->drv_cc, gm, gc);
}
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
const __le32 ucode[], const size_t nbytes)
{
struct bcma_device *core = wlc_hw->d11core;
uint i;
uint count;
brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
count = (nbytes / sizeof(u32));
bcma_write32(core, D11REGOFFS(objaddr),
OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
(void)bcma_read32(core, D11REGOFFS(objaddr));
for (i = 0; i < count; i++)
bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
}
static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
{
struct brcms_c_info *wlc;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
wlc = wlc_hw->wlc;
if (wlc_hw->ucode_loaded)
return;
if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band)) {
brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo,
ucode->bcm43xx_16_mimosz);
wlc_hw->ucode_loaded = true;
} else
brcms_err(wlc_hw->d11core,
"%s: wl%d: unsupported phy in corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (BRCMS_ISLCNPHY(wlc_hw->band)) {
brcms_ucode_write(wlc_hw, ucode->bcm43xx_24_lcn,
ucode->bcm43xx_24_lcnsz);
wlc_hw->ucode_loaded = true;
} else {
brcms_err(wlc_hw->d11core,
"%s: wl%d: unsupported phy in corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
}
}
}
void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant)
{
/* update sw state */
wlc_hw->bmac_phytxant = phytxant;
/* push to ucode if up */
if (!wlc_hw->up)
return;
brcms_c_ucode_txant_set(wlc_hw);
}
u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw)
{
return (u16) wlc_hw->wlc->stf->txant;
}
void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
{
wlc_hw->antsel_type = antsel_type;
/* Update the antsel type for phy module to use */
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
{
bool fatal = false;
uint unit;
uint intstatus, idx;
struct bcma_device *core = wlc_hw->d11core;
unit = wlc_hw->unit;
for (idx = 0; idx < NFIFO; idx++) {
/* read intstatus register and ignore any non-error bits */
intstatus =
bcma_read32(core,
D11REGOFFS(intctrlregs[idx].intstatus)) &
I_ERRORS;
if (!intstatus)
continue;
brcms_dbg_int(core, "wl%d: intstatus%d 0x%x\n",
unit, idx, intstatus);
if (intstatus & I_RO) {
brcms_err(core, "wl%d: fifo %d: receive fifo "
"overflow\n", unit, idx);
fatal = true;
}
if (intstatus & I_PC) {
brcms_err(core, "wl%d: fifo %d: descriptor error\n",
unit, idx);
fatal = true;
}
if (intstatus & I_PD) {
brcms_err(core, "wl%d: fifo %d: data error\n", unit,
idx);
fatal = true;
}
if (intstatus & I_DE) {
brcms_err(core, "wl%d: fifo %d: descriptor protocol "
"error\n", unit, idx);
fatal = true;
}
if (intstatus & I_RU)
brcms_err(core, "wl%d: fifo %d: receive descriptor "
"underflow\n", idx, unit);
if (intstatus & I_XU) {
brcms_err(core, "wl%d: fifo %d: transmit fifo "
"underflow\n", idx, unit);
fatal = true;
}
if (fatal) {
brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
break;
} else
bcma_write32(core,
D11REGOFFS(intctrlregs[idx].intstatus),
intstatus);
}
}
void brcms_c_intrson(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
if (!wlc_hw->clk)
return 0;
macintmask = wlc->macintmask; /* isr can still happen */
bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
udelay(1); /* ensure int line is no longer driven */
wlc->macintmask = 0;
/* return previous macintmask; resolve race between us and our isr */
return wlc->macintstatus ? 0 : macintmask;
}
void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
{
struct brcms_hardware *wlc_hw = wlc->hw;
if (!wlc_hw->clk)
return;
wlc->macintmask = macintmask;
bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
/* assumes that the d11 MAC is enabled */
static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
uint tx_fifo)
{
u8 fifo = 1 << tx_fifo;
/* Two clients of this code, 11h Quiet period and scanning. */
/* only suspend if not already suspended */
if ((wlc_hw->suspended_fifos & fifo) == fifo)
return;
/* force the core awake only if not already */
if (wlc_hw->suspended_fifos == 0)
brcms_c_ucode_wake_override_set(wlc_hw,
BRCMS_WAKE_OVERRIDE_TXFIFO);
wlc_hw->suspended_fifos |= fifo;
if (wlc_hw->di[tx_fifo]) {
/*
* Suspending AMPDU transmissions in the middle can cause
* underflow which may result in mismatch between ucode and
* driver so suspend the mac before suspending the FIFO
*/
if (BRCMS_PHY_11N_CAP(wlc_hw->band))
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
dma_txsuspend(wlc_hw->di[tx_fifo]);
if (BRCMS_PHY_11N_CAP(wlc_hw->band))
brcms_c_enable_mac(wlc_hw->wlc);
}
}
static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
uint tx_fifo)
{
/* BMAC_NOTE: BRCMS_TX_FIFO_ENAB is done in brcms_c_dpc() for DMA case
* but need to be done here for PIO otherwise the watchdog will catch
* the inconsistency and fire
*/
/* Two clients of this code, 11h Quiet period and scanning. */
if (wlc_hw->di[tx_fifo])
dma_txresume(wlc_hw->di[tx_fifo]);
/* allow core to sleep again */
if (wlc_hw->suspended_fifos == 0)
return;
else {
wlc_hw->suspended_fifos &= ~(1 << tx_fifo);
if (wlc_hw->suspended_fifos == 0)
brcms_c_ucode_wake_override_clear(wlc_hw,
BRCMS_WAKE_OVERRIDE_TXFIFO);
}
}
/* precondition: requires the mac core to be enabled */
static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
u8 *ethaddr = wlc_hw->wlc->pub->cur_etheraddr;
if (mute_tx) {
/* suspend tx fifos */
brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
/* zero the address match register so we do not send ACKs */
brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr);
} else {
/* resume tx fifos */
brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_resume(wlc_hw, TX_CTL_FIFO);
brcms_b_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO);
brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
/* Restore address */
brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, ethaddr);
}
wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
if (mute_tx)
brcms_c_ucode_mute_override_set(wlc_hw);
else
brcms_c_ucode_mute_override_clear(wlc_hw);
}
void
brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
{
brcms_b_mute(wlc->hw, mute_tx);
}
/*
* Read and clear macintmask and macintstatus and intstatus registers.
* This routine should be called with interrupts off
* Return:
* -1 if brcms_deviceremoved(wlc) evaluates to true;
* 0 if the interrupt is not for us, or we are in some special cases;
* device interrupt status bits otherwise.
*/
static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
u32 macintstatus, mask;
/* macintstatus includes a DMA interrupt summary bit */
macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
mask = in_isr ? wlc->macintmask : wlc->defmacintmask;
trace_brcms_macintstatus(&core->dev, in_isr, macintstatus, mask);
/* detect cardbus removed, in power down(suspend) and in reset */
if (brcms_deviceremoved(wlc))
return -1;
/* brcms_deviceremoved() succeeds even when the core is still resetting,
* handle that case here.
*/
if (macintstatus == 0xffffffff)
return 0;
/* defer unsolicited interrupts */
macintstatus &= mask;
/* if not for us */
if (macintstatus == 0)
return 0;
/* turn off the interrupts */
bcma_write32(core, D11REGOFFS(macintmask), 0);
(void)bcma_read32(core, D11REGOFFS(macintmask));
wlc->macintmask = 0;
/* clear device interrupts */
bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
/* MI_DMAINT is indication of non-zero intstatus */
if (macintstatus & MI_DMAINT)
/*
* only fifo interrupt enabled is I_RI in
* RX_FIFO. If MI_DMAINT is set, assume it
* is set and clear the interrupt.
*/
bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
DEF_RXINTMASK);
return macintstatus;
}
/* Update wlc->macintstatus and wlc->intstatus[]. */
/* Return true if they are updated successfully. false otherwise */
bool brcms_c_intrsupd(struct brcms_c_info *wlc)
{
u32 macintstatus;
/* read and clear macintstatus and intstatus registers */
macintstatus = wlc_intstatus(wlc, false);
/* device is removed */
if (macintstatus == 0xffffffff)
return false;
/* update interrupt status in software */
wlc->macintstatus |= macintstatus;
return true;
}
/*
* First-level interrupt processing.
* Return true if this was our interrupt
* and if further brcms_c_dpc() processing is required,
* false otherwise.
*/
bool brcms_c_isr(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintstatus;
if (!wlc_hw->up || !wlc->macintmask)
return false;
/* read and clear macintstatus and intstatus registers */
macintstatus = wlc_intstatus(wlc, true);
if (macintstatus == 0xffffffff) {
brcms_err(wlc_hw->d11core,
"DEVICEREMOVED detected in the ISR code path\n");
return false;
}
/* it is not for us */
if (macintstatus == 0)
return false;
/* save interrupt status bits */
wlc->macintstatus = macintstatus;
return true;
}
void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
wlc_hw->band->bandunit);
/*
* Track overlapping suspend requests
*/
wlc_hw->mac_suspend_depth++;
if (wlc_hw->mac_suspend_depth > 1)
return;
/* force the core awake */
brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
}
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(!(mc & MCTL_EN_MAC));
mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
}
WARN_ON(mi & MI_MACSSPNDD);
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
BRCMS_MAX_MAC_SUSPEND);
if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
brcms_err(core, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
brcms_err(core, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
bcma_read32(core, D11REGOFFS(psmdebug)),
bcma_read32(core, D11REGOFFS(phydebug)),
bcma_read16(core, D11REGOFFS(psm_brc)));
}
mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
}
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(mc & MCTL_EN_MAC);
}
void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
wlc->band->bandunit);
/*
* Track overlapping suspend requests
*/
wlc_hw->mac_suspend_depth--;
if (wlc_hw->mac_suspend_depth > 0)
return;
mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_EN_MAC));
WARN_ON(!(mc & MCTL_PSM_RUN));
mi = bcma_read32(core, D11REGOFFS(macintstatus));
WARN_ON(mi & MI_MACSSPNDD);
brcms_c_ucode_wake_override_clear(wlc_hw,
BRCMS_WAKE_OVERRIDE_MACSUSPEND);
}
void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
{
wlc_hw->hw_stf_ss_opmode = stf_mode;
if (wlc_hw->clk)
brcms_upd_ofdm_pctl1_table(wlc_hw);
}
static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
struct bcma_device *core = wlc_hw->d11core;
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
/* Validate dchip register access */
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
w = bcma_read32(core, D11REGOFFS(objdata));
/* Can we write and read back a 32bit register? */
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0xaa5555aa) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0xaa5555aa\n", wlc_hw->unit, val);
return false;
}
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0x55aaaa55) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0x55aaaa55\n", wlc_hw->unit, val);
return false;
}
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
(void)bcma_read32(core, D11REGOFFS(objaddr));
bcma_write32(core, D11REGOFFS(objdata), w);
/* clear CFPStart */
bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
w = bcma_read32(core, D11REGOFFS(maccontrol));
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
"0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w,
(MCTL_IHR_EN | MCTL_WAKE),
(MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE));
return false;
}
return true;
}
#define PHYPLL_WAIT_US 100000
void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
tmp = 0;
if (on) {
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
CCS_ERSRC_REQ_HT |
CCS_ERSRC_REQ_D11PLL |
CCS_ERSRC_REQ_PHYPLL);
SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
PHYPLL_WAIT_US);
tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
brcms_err(core, "%s: turn on PHY PLL failed\n",
__func__);
} else {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
tmp | CCS_ERSRC_REQ_D11PLL |
CCS_ERSRC_REQ_PHYPLL);
SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL)) !=
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp &
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
brcms_err(core, "%s: turn on PHY PLL failed\n",
__func__);
}
} else {
/*
* Since the PLL may be shared, other cores can still
* be requesting it; so we'll deassert the request but
* not wait for status to comply.
*/
bcma_mask32(core, D11REGOFFS(clk_ctl_st),
~CCS_ERSRC_REQ_PHYPLL);
(void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
}
}
static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
brcms_dbg_info(wlc_hw->d11core, "wl%d: disable core\n", wlc_hw->unit);
dev_gone = brcms_deviceremoved(wlc_hw->wlc);
if (dev_gone)
return;
if (wlc_hw->noreset)
return;
/* radio off */
wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
/* turn off analog core */
wlc_phy_anacore(wlc_hw->band->pi, OFF);
/* turn off PHYPLL to save power */
brcms_b_core_phypll_ctl(wlc_hw, false);
wlc_hw->clk = false;
bcma_core_disable(wlc_hw->d11core, 0);
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
static void brcms_c_flushqueues(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
uint i;
/* free any posted tx packets */
for (i = 0; i < NFIFO; i++) {
if (wlc_hw->di[i]) {
dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
if (i < TX_BCMC_FIFO)
ieee80211_wake_queue(wlc->pub->ieee_hw,
brcms_fifo_to_ac(i));
}
}
/* free any posted rx packets */
dma_rxreclaim(wlc_hw->di[RX_FIFO]);
}
static u16
brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
struct bcma_device *core = wlc_hw->d11core;
u16 objoff = D11REGOFFS(objdata);
bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
(void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
objoff += 2;
return bcma_read16(core, objoff);
}
static void
brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
u32 sel)
{
struct bcma_device *core = wlc_hw->d11core;
u16 objoff = D11REGOFFS(objdata);
bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
(void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
objoff += 2;
bcma_wflush16(core, objoff, v);
}
/*
* Read a single u16 from shared memory.
* SHM 'offset' needs to be an even address
*/
u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset)
{
return brcms_b_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
}
/*
* Write a single u16 to shared memory.
* SHM 'offset' needs to be an even address
*/
void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v)
{
brcms_b_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
}
/*
* Copy a buffer to shared memory of specified type .
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
* 'sel' selects the type of memory
*/
void
brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
const void *buf, int len, u32 sel)
{
u16 v;
const u8 *p = (const u8 *)buf;
int i;
if (len <= 0 || (offset & 1) || (len & 1))
return;
for (i = 0; i < len; i += 2) {
v = p[i] | (p[i + 1] << 8);
brcms_b_write_objmem(wlc_hw, offset + i, v, sel);
}
}
/*
* Copy a piece of shared memory of specified type to a buffer .
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
* 'sel' selects the type of memory
*/
void
brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset, void *buf,
int len, u32 sel)
{
u16 v;
u8 *p = (u8 *) buf;
int i;
if (len <= 0 || (offset & 1) || (len & 1))
return;
for (i = 0; i < len; i += 2) {
v = brcms_b_read_objmem(wlc_hw, offset + i, sel);
p[i] = v & 0xFF;
p[i + 1] = (v >> 8) & 0xFF;
}
}
/* Copy a buffer to shared memory.
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
*/
static void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset,
const void *buf, int len)
{
brcms_b_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL);
}
static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
u16 SRL, u16 LRL)
{
wlc_hw->SRL = SRL;
wlc_hw->LRL = LRL;
/* write retry limit to SCR, shouldn't need to suspend */
if (wlc_hw->up) {
bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
(void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
}
}
static void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set, u32 req_bit)
{
if (set) {
if (mboolisset(wlc_hw->pllreq, req_bit))
return;
mboolset(wlc_hw->pllreq, req_bit);
if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
if (!wlc_hw->sbclk)
brcms_b_xtal(wlc_hw, ON);
}
} else {
if (!mboolisset(wlc_hw->pllreq, req_bit))
return;
mboolclr(wlc_hw->pllreq, req_bit);
if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
if (wlc_hw->sbclk)
brcms_b_xtal(wlc_hw, OFF);
}
}
}
static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
{
wlc_hw->antsel_avail = antsel_avail;
}
/*
* conditions under which the PM bit should be set in outgoing frames
* and STAY_AWAKE is meaningful
*/
static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{
/* not supporting PS so always return false for now */
return false;
}
static void brcms_c_statsupd(struct brcms_c_info *wlc)
{
int i;
struct macstat macstats;
#ifdef DEBUG
u16 delta;
u16 rxf0ovfl;
u16 txfunfl[NFIFO];
#endif /* DEBUG */
/* if driver down, make no sense to update stats */
if (!wlc->pub->up)
return;
#ifdef DEBUG
/* save last rx fifo 0 overflow count */
rxf0ovfl = wlc->core->macstat_snapshot->rxf0ovfl;
/* save last tx fifo underflow count */
for (i = 0; i < NFIFO; i++)
txfunfl[i] = wlc->core->macstat_snapshot->txfunfl[i];
#endif /* DEBUG */
/* Read mac stats from contiguous shared memory */
brcms_b_copyfrom_objmem(wlc->hw, M_UCODE_MACSTAT, &macstats,
sizeof(struct macstat), OBJADDR_SHM_SEL);
#ifdef DEBUG
/* check for rx fifo 0 overflow */
delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
if (delta)
brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
wlc->pub->unit, delta);
/* check for tx fifo underflows */
for (i = 0; i < NFIFO; i++) {
delta =
(u16) (wlc->core->macstat_snapshot->txfunfl[i] -
txfunfl[i]);
if (delta)
brcms_err(wlc->hw->d11core,
"wl%d: %u tx fifo %d underflows!\n",
wlc->pub->unit, delta, i);
}
#endif /* DEBUG */
/* merge counters from dma module */
for (i = 0; i < NFIFO; i++) {
if (wlc->hw->di[i])
dma_counterreset(wlc->hw->di[i]);
}
}
static void brcms_b_reset(struct brcms_hardware *wlc_hw)
{
/* reset the core */
if (!brcms_deviceremoved(wlc_hw->wlc))
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
/* purge the dma rings */
brcms_c_flushqueues(wlc_hw->wlc);
}
void brcms_c_reset(struct brcms_c_info *wlc)
{
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* slurp up hw mac counters before core reset */
brcms_c_statsupd(wlc);
/* reset our snapshot of macstat counters */
memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat));
brcms_b_reset(wlc->hw);
}
void brcms_c_init_scb(struct scb *scb)
{
int i;
memset(scb, 0, sizeof(struct scb));
scb->flags = SCB_WMECAP | SCB_HTCAP;
for (i = 0; i < NUMPRIO; i++) {
scb->seqnum[i] = 0;
scb->seqctl[i] = 0xFFFF;
}
scb->seqctl_nonqos = 0xFFFF;
scb->magic = SCB_MAGIC;
}
/* d11 core init
* reset PSM
* download ucode/PCM
* let ucode run to suspended
* download ucode inits
* config other core registers
* init dma
*/
static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
u32 sflags;
u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
int err = 0;
u16 buf[NFIFO];
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
brcms_dbg_info(core, "wl%d: core init\n", wlc_hw->unit);
/* reset PSM */
brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
brcms_ucode_download(wlc_hw);
/*
* FIFOSZ fixup. driver wants to controls the fifo allocation.
*/
fifosz_fixup = true;
/* let the PSM run to the suspended state, set mode to BSS STA */
bcma_write32(core, D11REGOFFS(macintstatus), -1);
brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
MI_MACSSPNDD) == 0), 1000 * 1000);
if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
brcms_err(core, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
sflags = bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16);
else
brcms_err(core, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (BRCMS_ISLCNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24);
else
brcms_err(core, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
wlc_hw->corerev);
} else {
brcms_err(core, "%s: wl%d: unsupported corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
}
/* For old ucode, txfifo sizes needs to be modified(increased) */
if (fifosz_fixup)
brcms_b_corerev_fifofixup(wlc_hw);
/* check txfifo allocations match between ucode and driver */
buf[TX_AC_BE_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE0);
if (buf[TX_AC_BE_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]) {
i = TX_AC_BE_FIFO;
err = -1;
}
buf[TX_AC_VI_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE1);
if (buf[TX_AC_VI_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]) {
i = TX_AC_VI_FIFO;
err = -1;
}
buf[TX_AC_BK_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE2);
buf[TX_AC_VO_FIFO] = (buf[TX_AC_BK_FIFO] >> 8) & 0xff;
buf[TX_AC_BK_FIFO] &= 0xff;
if (buf[TX_AC_BK_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BK_FIFO]) {
i = TX_AC_BK_FIFO;
err = -1;
}
if (buf[TX_AC_VO_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO]) {
i = TX_AC_VO_FIFO;
err = -1;
}
buf[TX_BCMC_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE3);
buf[TX_ATIM_FIFO] = (buf[TX_BCMC_FIFO] >> 8) & 0xff;
buf[TX_BCMC_FIFO] &= 0xff;
if (buf[TX_BCMC_FIFO] != wlc_hw->xmtfifo_sz[TX_BCMC_FIFO]) {
i = TX_BCMC_FIFO;
err = -1;
}
if (buf[TX_ATIM_FIFO] != wlc_hw->xmtfifo_sz[TX_ATIM_FIFO]) {
i = TX_ATIM_FIFO;
err = -1;
}
if (err != 0)
brcms_err(core, "wlc_coreinit: txfifo mismatch: ucode size %d"
" driver size %d index %d\n", buf[i],
wlc_hw->xmtfifo_sz[i], i);
/* make sure we can still talk to the mac */
WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
/* band-specific inits done by wlc_bsinit() */
/* Set up frame burst size and antenna swap threshold init values */
brcms_b_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST);
brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
brcms_b_mctrl(wlc_hw,
(MCTL_INFRA | MCTL_DISCARD_PMQ | MCTL_AP),
(MCTL_INFRA | MCTL_DISCARD_PMQ));
/* set up Beacon interval */
bcnint_us = 0x8000 << 10;
bcma_write32(core, D11REGOFFS(tsf_cfprep),
(bcnint_us << CFPREP_CBI_SHIFT));
bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
/* write interrupt mask */
bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
/* tell the ucode the corerev */
brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
/* tell the ucode MAC capabilities */
brcms_b_write_shm(wlc_hw, M_MACHW_CAP_L,
(u16) (wlc_hw->machwcap & 0xffff));
brcms_b_write_shm(wlc_hw, M_MACHW_CAP_H,
(u16) ((wlc_hw->
machwcap >> 16) & 0xffff));
/* write retry limits to SCR, this done after PSM init */
bcma_write32(core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
(void)bcma_read32(core, D11REGOFFS(objaddr));
bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
bcma_write32(core, D11REGOFFS(objaddr),
OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
(void)bcma_read32(core, D11REGOFFS(objaddr));
bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
/* write rate fallback retry limits */
brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
/* init the tx dma engines */
for (i = 0; i < NFIFO; i++) {
if (wlc_hw->di[i])
dma_txinit(wlc_hw->di[i]);
}
/* init the rx dma engine(s) and post receive buffers */
dma_rxinit(wlc_hw->di[RX_FIFO]);
dma_rxfill(wlc_hw->di[RX_FIFO]);
}
void
static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
u32 macintmask;
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
/* set up the specified band and chanspec */
brcms_c_setxband(wlc_hw, chspec_bandunit(chanspec));
wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
/* do one-time phy inits and calibration */
wlc_phy_cal_init(wlc_hw->band->pi);
/* core-specific initialization */
brcms_b_coreinit(wlc);
/* band-specific inits */
brcms_b_bsinit(wlc, chanspec);
/* restore macintmask */
brcms_intrsrestore(wlc->wl, macintmask);
/* seed wake_override with BRCMS_WAKE_OVERRIDE_MACSUSPEND since the mac
* is suspended and brcms_c_enable_mac() will clear this override bit.
*/
mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
/*
* initialize mac_suspend_depth to 1 to match ucode
* initial suspended state
*/
wlc_hw->mac_suspend_depth = 1;
/* restore the clk */
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
u16 chanspec)
{
/* Save our copy of the chanspec */
wlc->chanspec = chanspec;
/* Set the chanspec and power limits for this locale */
brcms_c_channel_set_chanspec(wlc->cmi, chanspec, BRCMS_TXPWR_MAX);
if (wlc->stf->ss_algosel_auto)
brcms_c_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel,
chanspec);
brcms_c_stf_ss_update(wlc, wlc->band);
}
static void
brcms_default_rateset(struct brcms_c_info *wlc, struct brcms_c_rateset *rs)
{
brcms_c_rateset_default(rs, NULL, wlc->band->phytype,
wlc->band->bandtype, false, BRCMS_RATE_MASK_FULL,
(bool) (wlc->pub->_n_enab & SUPPORT_11N),
brcms_chspec_bw(wlc->default_bss->chanspec),
wlc->stf->txstreams);
}
/* derive wlc->band->basic_rate[] table from 'rateset' */
static void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
struct brcms_c_rateset *rateset)
{
u8 rate;
u8 mandatory;
u8 cck_basic = 0;
u8 ofdm_basic = 0;
u8 *br = wlc->band->basic_rate;
uint i;
/* incoming rates are in 500kbps units as in 802.11 Supported Rates */
memset(br, 0, BRCM_MAXRATE + 1);
/* For each basic rate in the rates list, make an entry in the
* best basic lookup.
*/
for (i = 0; i < rateset->count; i++) {
/* only make an entry for a basic rate */
if (!(rateset->rates[i] & BRCMS_RATE_FLAG))
continue;
/* mask off basic bit */
rate = (rateset->rates[i] & BRCMS_RATE_MASK);
if (rate > BRCM_MAXRATE) {
brcms_err(wlc->hw->d11core, "brcms_c_rate_lookup_init: "
"invalid rate 0x%X in rate set\n",
rateset->rates[i]);
continue;
}
br[rate] = rate;
}
/* The rate lookup table now has non-zero entries for each
* basic rate, equal to the basic rate: br[basicN] = basicN
*
* To look up the best basic rate corresponding to any
* particular rate, code can use the basic_rate table
* like this
*
* basic_rate = wlc->band->basic_rate[tx_rate]
*
* Make sure there is a best basic rate entry for
* every rate by walking up the table from low rates
* to high, filling in holes in the lookup table
*/
for (i = 0; i < wlc->band->hw_rateset.count; i++) {
rate = wlc->band->hw_rateset.rates[i];
if (br[rate] != 0) {
/* This rate is a basic rate.
* Keep track of the best basic rate so far by
* modulation type.
*/
if (is_ofdm_rate(rate))
ofdm_basic = rate;
else
cck_basic = rate;
continue;
}
/* This rate is not a basic rate so figure out the
* best basic rate less than this rate and fill in
* the hole in the table
*/
br[rate] = is_ofdm_rate(rate) ? ofdm_basic : cck_basic;
if (br[rate] != 0)
continue;
if (is_ofdm_rate(rate)) {
/*
* In 11g and 11a, the OFDM mandatory rates
* are 6, 12, and 24 Mbps
*/
if (rate >= BRCM_RATE_24M)
mandatory = BRCM_RATE_24M;
else if (rate >= BRCM_RATE_12M)
mandatory = BRCM_RATE_12M;
else
mandatory = BRCM_RATE_6M;
} else {
/* In 11b, all CCK rates are mandatory 1 - 11 Mbps */
mandatory = rate;
}
br[rate] = mandatory;
}
}
static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
u16 chanspec)
{
struct brcms_c_rateset default_rateset;
uint parkband;
uint i, band_order[2];
/*
* We might have been bandlocked during down and the chip
* power-cycled (hibernate). Figure out the right band to park on
*/
if (wlc->bandlocked || wlc->pub->_nbands == 1) {
/* updated in brcms_c_bandlock() */
parkband = wlc->band->bandunit;
band_order[0] = band_order[1] = parkband;
} else {
/* park on the band of the specified chanspec */
parkband = chspec_bandunit(chanspec);
/* order so that parkband initialize last */
band_order[0] = parkband ^ 1;
band_order[1] = parkband;
}
/* make each band operational, software state init */
for (i = 0; i < wlc->pub->_nbands; i++) {
uint j = band_order[i];
wlc->band = wlc->bandstate[j];
brcms_default_rateset(wlc, &default_rateset);
/* fill in hw_rate */
brcms_c_rateset_filter(&default_rateset, &wlc->band->hw_rateset,
false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
(bool) (wlc->pub->_n_enab & SUPPORT_11N));
/* init basic rate lookup */
brcms_c_rate_lookup_init(wlc, &default_rateset);
}
/* sync up phy/radio chanspec */
brcms_c_set_phy_chanspec(wlc, chanspec);
}
/*
* Set or clear filtering related maccontrol bits based on
* specified filter flags
*/
void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
wlc->filter_flags = filter_flags;
if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
promisc_bits |= MCTL_PROMISC;
if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
promisc_bits |= MCTL_BCNS_PROMISC;
if (filter_flags & FIF_FCSFAIL)
promisc_bits |= MCTL_KEEPBADFCS;
if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
promisc_bits |= MCTL_KEEPCONTROL;
brcms_b_mctrl(wlc->hw,
MCTL_PROMISC | MCTL_BCNS_PROMISC |
MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
promisc_bits);
}
/*
* ucode, hwmac update
* Channel dependent updates for ucode and hw
*/
static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
{
/* enable or disable any active IBSSs depending on whether or not
* we are on the home channel
*/
if (wlc->home_chanspec == wlc_phy_chanspec_get(wlc->band->pi)) {
if (wlc->pub->associated) {
/*
* BMAC_NOTE: This is something that should be fixed
* in ucode inits. I think that the ucode inits set
* up the bcn templates and shm values with a bogus
* beacon. This should not be done in the inits. If
* ucode needs to set up a beacon for testing, the
* test routines should write it down, not expect the
* inits to populate a bogus beacon.
*/
if (BRCMS_PHY_11N_CAP(wlc->band))
brcms_b_write_shm(wlc->hw,
M_BCN_TXTSF_OFFSET, 0);
}
} else {
/* disable an active IBSS if we are not on the home channel */
}
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
u8 basic_rate)
{
u8 phy_rate, index;
u8 basic_phy_rate, basic_index;
u16 dir_table, basic_table;
u16 basic_ptr;
/* Shared memory address for the table we are reading */
dir_table = is_ofdm_rate(basic_rate) ? M_RT_DIRMAP_A : M_RT_DIRMAP_B;
/* Shared memory address for the table we are writing */
basic_table = is_ofdm_rate(rate) ? M_RT_BBRSMAP_A : M_RT_BBRSMAP_B;
/*
* for a given rate, the LS-nibble of the PLCP SIGNAL field is
* the index into the rate table.
*/
phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
basic_phy_rate = rate_info[basic_rate] & BRCMS_RATE_MASK;
index = phy_rate & 0xf;
basic_index = basic_phy_rate & 0xf;
/* Find the SHM pointer to the ACK rate entry by looking in the
* Direct-map Table
*/
basic_ptr = brcms_b_read_shm(wlc->hw, (dir_table + basic_index * 2));
/* Update the SHM BSS-basic-rate-set mapping table with the pointer
* to the correct basic rate for the given incoming rate
*/
brcms_b_write_shm(wlc->hw, (basic_table + index * 2), basic_ptr);
}
static const struct brcms_c_rateset *
brcms_c_rateset_get_hwrs(struct brcms_c_info *wlc)
{
const struct brcms_c_rateset *rs_dflt;
if (BRCMS_PHY_11N_CAP(wlc->band)) {
if (wlc->band->bandtype == BRCM_BAND_5G)
rs_dflt = &ofdm_mimo_rates;
else
rs_dflt = &cck_ofdm_mimo_rates;
} else if (wlc->band->gmode)
rs_dflt = &cck_ofdm_rates;
else
rs_dflt = &cck_rates;
return rs_dflt;
}
static void brcms_c_set_ratetable(struct brcms_c_info *wlc)
{
const struct brcms_c_rateset *rs_dflt;
struct brcms_c_rateset rs;
u8 rate, basic_rate;
uint i;
rs_dflt = brcms_c_rateset_get_hwrs(wlc);
brcms_c_rateset_copy(rs_dflt, &rs);
brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
/* walk the phy rate table and update SHM basic rate lookup table */
for (i = 0; i < rs.count; i++) {
rate = rs.rates[i] & BRCMS_RATE_MASK;
/* for a given rate brcms_basic_rate returns the rate at
* which a response ACK/CTS should be sent.
*/
basic_rate = brcms_basic_rate(wlc, rate);
if (basic_rate == 0)
/* This should only happen if we are using a
* restricted rateset.
*/
basic_rate = rs.rates[0] & BRCMS_RATE_MASK;
brcms_c_write_rate_shm(wlc, rate, basic_rate);
}
}
/* band-specific init */
static void brcms_c_bsinit(struct brcms_c_info *wlc)
{
brcms_dbg_info(wlc->hw->d11core, "wl%d: bandunit %d\n",
wlc->pub->unit, wlc->band->bandunit);
/* write ucode ACK/CTS rate table */
brcms_c_set_ratetable(wlc);
/* update some band specific mac configuration */
brcms_c_ucode_mac_upd(wlc);
/* init antenna selection */
brcms_c_antsel_init(wlc->asi);
}
/* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
static int
brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
bool writeToShm)
{
int idle_busy_ratio_x_16 = 0;
uint offset =
isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
M_TX_IDLE_BUSY_RATIO_X_16_CCK;
if (duty_cycle > 100 || duty_cycle < 0) {
brcms_err(wlc->hw->d11core,
"wl%d: duty cycle value off limit\n",
wlc->pub->unit);
return -EINVAL;
}
if (duty_cycle)
idle_busy_ratio_x_16 = (100 - duty_cycle) * 16 / duty_cycle;
/* Only write to shared memory when wl is up */
if (writeToShm)
brcms_b_write_shm(wlc->hw, offset, (u16) idle_busy_ratio_x_16);
if (isOFDM)
wlc->tx_duty_cycle_ofdm = (u16) duty_cycle;
else
wlc->tx_duty_cycle_cck = (u16) duty_cycle;
return 0;
}
/* push sw hps and wake state through hardware */
static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
{
u32 v1, v2;
bool hps;
bool awake_before;
hps = brcms_c_ps_allowed(wlc);
brcms_dbg_mac80211(wlc->hw->d11core, "wl%d: hps %d\n", wlc->pub->unit,
hps);
v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
if (hps)
v2 |= MCTL_HPS;
brcms_b_mctrl(wlc->hw, MCTL_WAKE | MCTL_HPS, v2);
awake_before = ((v1 & MCTL_WAKE) || ((v1 & MCTL_HPS) == 0));
if (!awake_before)
brcms_b_wait_for_wake(wlc->hw);
}
/*
* Write this BSS config's MAC address to core.
* Updates RXE match engine.
*/
static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
{
int err = 0;
struct brcms_c_info *wlc = bsscfg->wlc;
/* enter the MAC addr into the RXE match registers */
brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, wlc->pub->cur_etheraddr);
brcms_c_ampdu_macaddr_upd(wlc);
return err;
}
/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
* Updates RXE match engine.
*/
static void brcms_c_set_bssid(struct brcms_bss_cfg *bsscfg)
{
/* we need to update BSSID in RXE match registers */
brcms_c_set_addrmatch(bsscfg->wlc, RCM_BSSID_OFFSET, bsscfg->BSSID);
}
void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len)
{
u8 len = min_t(u8, sizeof(wlc->bsscfg->SSID), ssid_len);
memset(wlc->bsscfg->SSID, 0, sizeof(wlc->bsscfg->SSID));
memcpy(wlc->bsscfg->SSID, ssid, len);
wlc->bsscfg->SSID_len = len;
}
static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
{
wlc_hw->shortslot = shortslot;
if (wlc_hw->band->bandtype == BRCM_BAND_2G && wlc_hw->up) {
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
brcms_b_update_slot_timing(wlc_hw, shortslot);
brcms_c_enable_mac(wlc_hw->wlc);
}
}
/*
* Suspend the the MAC and update the slot timing
* for standard 11b/g (20us slots) or shortslot 11g (9us slots).
*/
static void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
{
/* use the override if it is set */
if (wlc->shortslot_override != BRCMS_SHORTSLOT_AUTO)
shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON);
if (wlc->shortslot == shortslot)
return;
wlc->shortslot = shortslot;
brcms_b_set_shortslot(wlc->hw, shortslot);
}
static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
{
if (wlc->home_chanspec != chanspec) {
wlc->home_chanspec = chanspec;
if (wlc->pub->associated)
wlc->bsscfg->current_bss->chanspec = chanspec;
}
}
void
brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
bool mute_tx, struct txpwr_limits *txpwr)
{
uint bandunit;
brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: 0x%x\n", wlc_hw->unit,
chanspec);
wlc_hw->chanspec = chanspec;
/* Switch bands if necessary */
if (wlc_hw->_nbands > 1) {
bandunit = chspec_bandunit(chanspec);
if (wlc_hw->band->bandunit != bandunit) {
/* brcms_b_setband disables other bandunit,
* use light band switch if not up yet
*/
if (wlc_hw->up) {
wlc_phy_chanspec_radio_set(wlc_hw->
bandstate[bandunit]->
pi, chanspec);
brcms_b_setband(wlc_hw, bandunit, chanspec);
} else {
brcms_c_setxband(wlc_hw, bandunit);
}
}
}
wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
if (!wlc_hw->up) {
if (wlc_hw->clk)
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr,
chanspec);
wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
} else {
wlc_phy_chanspec_set(wlc_hw->band->pi, chanspec);
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
/* Update muting of the channel */
brcms_b_mute(wlc_hw, mute_tx);
}
}
/* switch to and initialize new band */
static void brcms_c_setband(struct brcms_c_info *wlc,
uint bandunit)
{
wlc->band = wlc->bandstate[bandunit];
if (!wlc->pub->up)
return;
/* wait for at least one beacon before entering sleeping state */
brcms_c_set_ps_ctrl(wlc);
/* band-specific initializations */
brcms_c_bsinit(wlc);
}
static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
{
uint bandunit;
bool switchband = false;
u16 old_chanspec = wlc->chanspec;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
brcms_err(wlc->hw->d11core, "wl%d: %s: Bad channel %d\n",
wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
return;
}
/* Switch bands if necessary */
if (wlc->pub->_nbands > 1) {
bandunit = chspec_bandunit(chanspec);
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
switchband = true;
if (wlc->bandlocked) {
brcms_err(wlc->hw->d11core,
"wl%d: %s: chspec %d band is locked!\n",
wlc->pub->unit, __func__,
CHSPEC_CHANNEL(chanspec));
return;
}
/*
* should the setband call come after the
* brcms_b_chanspec() ? if the setband updates
* (brcms_c_bsinit) use low level calls to inspect and
* set state, the state inspected may be from the wrong
* band, or the following brcms_b_set_chanspec() may
* undo the work.
*/
brcms_c_setband(wlc, bandunit);
}
}
/* sync up phy/radio chanspec */
brcms_c_set_phy_chanspec(wlc, chanspec);
/* init antenna selection */
if (brcms_chspec_bw(old_chanspec) != brcms_chspec_bw(chanspec)) {
brcms_c_antsel_init(wlc->asi);
/* Fix the hardware rateset based on bw.
* Mainly add MCS32 for 40Mhz, remove MCS 32 for 20Mhz
*/
brcms_c_rateset_bw_mcs_filter(&wlc->band->hw_rateset,
wlc->band->mimo_cap_40 ? brcms_chspec_bw(chanspec) : 0);
}
/* update some mac configuration since chanspec changed */
brcms_c_ucode_mac_upd(wlc);
}
/*
* This function changes the phytxctl for beacon based on current
* beacon ratespec AND txant setting as per this table:
* ratespec CCK ant = wlc->stf->txant
* OFDM ant = 3
*/
void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
u32 bcn_rspec)
{
u16 phyctl;
u16 phytxant = wlc->stf->phytxant;
u16 mask = PHY_TXC_ANT_MASK;
/* for non-siso rates or default setting, use the available chains */
if (BRCMS_PHY_11N_CAP(wlc->band))
phytxant = brcms_c_stf_phytxchain_sel(wlc, bcn_rspec);
phyctl = brcms_b_read_shm(wlc->hw, M_BCN_PCTLWD);
phyctl = (phyctl & ~mask) | phytxant;
brcms_b_write_shm(wlc->hw, M_BCN_PCTLWD, phyctl);
}
/*
* centralized protection config change function to simplify debugging, no
* consistency checking this should be called only on changes to avoid overhead
* in periodic function
*/
void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
{
/*
* Cannot use brcms_dbg_* here because this function is called
* before wlc is sufficiently initialized.
*/
BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
switch (idx) {
case BRCMS_PROT_G_SPEC:
wlc->protection->_g = (bool) val;
break;
case BRCMS_PROT_G_OVR:
wlc->protection->g_override = (s8) val;
break;
case BRCMS_PROT_G_USER:
wlc->protection->gmode_user = (u8) val;
break;
case BRCMS_PROT_OVERLAP:
wlc->protection->overlap = (s8) val;
break;
case BRCMS_PROT_N_USER:
wlc->protection->nmode_user = (s8) val;
break;
case BRCMS_PROT_N_CFG:
wlc->protection->n_cfg = (s8) val;
break;
case BRCMS_PROT_N_CFG_OVR:
wlc->protection->n_cfg_override = (s8) val;
break;
case BRCMS_PROT_N_NONGF:
wlc->protection->nongf = (bool) val;
break;
case BRCMS_PROT_N_NONGF_OVR:
wlc->protection->nongf_override = (s8) val;
break;
case BRCMS_PROT_N_PAM_OVR:
wlc->protection->n_pam_override = (s8) val;
break;
case BRCMS_PROT_N_OBSS:
wlc->protection->n_obss = (bool) val;
break;
default:
break;
}
}
static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val)
{
if (wlc->pub->up) {
brcms_c_update_beacon(wlc);
brcms_c_update_probe_resp(wlc, true);
}
}
static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val)
{
wlc->stf->ldpc = val;
if (wlc->pub->up) {
brcms_c_update_beacon(wlc);
brcms_c_update_probe_resp(wlc, true);
wlc_phy_ldpc_override_set(wlc->band->pi, (val ? true : false));
}
}
void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
const struct ieee80211_tx_queue_params *params,
bool suspend)
{
int i;
struct shm_acparams acp_shm;
u16 *shm_entry;
/* Only apply params if the core is out of reset and has clocks */
if (!wlc->clk) {
brcms_err(wlc->hw->d11core, "wl%d: %s : no-clock\n",
wlc->pub->unit, __func__);
return;
}
memset(&acp_shm, 0, sizeof(struct shm_acparams));
/* fill in shm ac params struct */
acp_shm.txop = params->txop;
/* convert from units of 32us to us for ucode */
wlc->edcf_txop[aci & 0x3] = acp_shm.txop =
EDCF_TXOP2USEC(acp_shm.txop);
acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
&& acp_shm.aifs < EDCF_AIFSN_MAX)
acp_shm.aifs++;
if (acp_shm.aifs < EDCF_AIFSN_MIN
|| acp_shm.aifs > EDCF_AIFSN_MAX) {
brcms_err(wlc->hw->d11core, "wl%d: edcf_setparams: bad "
"aifs %d\n", wlc->pub->unit, acp_shm.aifs);
} else {
acp_shm.cwmin = params->cw_min;
acp_shm.cwmax = params->cw_max;
acp_shm.cwcur = acp_shm.cwmin;
acp_shm.bslots =
bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
wme_ac2fifo[aci] *
M_EDCF_QLEN +
M_EDCF_STATUS_OFF));
acp_shm.status |= WME_STATUS_NEWAC;
/* Fill in shm acparam table */
shm_entry = (u16 *) &acp_shm;
for (i = 0; i < (int)sizeof(struct shm_acparams); i += 2)
brcms_b_write_shm(wlc->hw,
M_EDCF_QINFO +
wme_ac2fifo[aci] * M_EDCF_QLEN + i,
*shm_entry++);
}
if (suspend)
brcms_c_suspend_mac_and_wait(wlc);
brcms_c_update_beacon(wlc);
brcms_c_update_probe_resp(wlc, false);
if (suspend)
brcms_c_enable_mac(wlc);
}
static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
{
u16 aci;
int i_ac;
struct ieee80211_tx_queue_params txq_pars;
static const struct edcf_acparam default_edcf_acparams[] = {
{EDCF_AC_BE_ACI_STA, EDCF_AC_BE_ECW_STA, EDCF_AC_BE_TXOP_STA},
{EDCF_AC_BK_ACI_STA, EDCF_AC_BK_ECW_STA, EDCF_AC_BK_TXOP_STA},
{EDCF_AC_VI_ACI_STA, EDCF_AC_VI_ECW_STA, EDCF_AC_VI_TXOP_STA},
{EDCF_AC_VO_ACI_STA, EDCF_AC_VO_ECW_STA, EDCF_AC_VO_TXOP_STA}
}; /* ucode needs these parameters during its initialization */
const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
/* fill in shm ac params struct */
txq_pars.txop = edcf_acp->TXOP;
txq_pars.aifs = edcf_acp->ACI;
/* CWmin = 2^(ECWmin) - 1 */
txq_pars.cw_min = EDCF_ECW2CW(edcf_acp->ECW & EDCF_ECWMIN_MASK);
/* CWmax = 2^(ECWmax) - 1 */
txq_pars.cw_max = EDCF_ECW2CW((edcf_acp->ECW & EDCF_ECWMAX_MASK)
>> EDCF_ECWMAX_SHIFT);
brcms_c_wme_setparams(wlc, aci, &txq_pars, suspend);
}
if (suspend) {
brcms_c_suspend_mac_and_wait(wlc);
brcms_c_enable_mac(wlc);
}
}
static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
if (wlc->radio_monitor)
return;
wlc->radio_monitor = true;
brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_RADIO_MON);
brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
}
static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
{
if (!wlc->radio_monitor)
return true;
wlc->radio_monitor = false;
brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_RADIO_MON);
return brcms_del_timer(wlc->radio_timer);
}
/* read hwdisable state and propagate to wlc flag */
static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
{
if (wlc->pub->hw_off)
return;
if (brcms_b_radio_read_hwdisabled(wlc->hw))
mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
else
mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
}
/* update hwradio status and return it */
bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
{
brcms_c_radio_hwdisable_upd(wlc);
return mboolisset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE) ?
true : false;
}
/* periodical query hw radio button while driver is "down" */
static void brcms_c_radio_timer(void *arg)
{
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
if (brcms_deviceremoved(wlc)) {
brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
wlc->pub->unit, __func__);
brcms_down(wlc->wl);
return;
}
brcms_c_radio_hwdisable_upd(wlc);
}
/* common low-level watchdog code */
static void brcms_b_watchdog(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
if (!wlc_hw->up)
return;
/* increment second count */
wlc_hw->now++;
/* Check for FIFO error interrupts */
brcms_b_fifoerrors(wlc_hw);
/* make sure RX dma has buffers */
dma_rxfill(wlc->hw->di[RX_FIFO]);
wlc_phy_watchdog(wlc_hw->band->pi);
}
/* common watchdog code */
static void brcms_c_watchdog(struct brcms_c_info *wlc)
{
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
if (!wlc->pub->up)
return;
if (brcms_deviceremoved(wlc)) {
brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
wlc->pub->unit, __func__);
brcms_down(wlc->wl);
return;
}
/* increment second count */
wlc->pub->now++;
brcms_c_radio_hwdisable_upd(wlc);
/* if radio is disable, driver may be down, quit here */
if (wlc->pub->radio_disabled)
return;
brcms_b_watchdog(wlc);
/*
* occasionally sample mac stat counters to
* detect 16-bit counter wrap
*/
if ((wlc->pub->now % SW_TIMER_MAC_STAT_UPD) == 0)
brcms_c_statsupd(wlc);
if (BRCMS_ISNPHY(wlc->band) &&
((wlc->pub->now - wlc->tempsense_lasttime) >=
BRCMS_TEMPSENSE_PERIOD)) {
wlc->tempsense_lasttime = wlc->pub->now;
brcms_c_tempsense_upd(wlc);
}
}
static void brcms_c_watchdog_by_timer(void *arg)
{
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
brcms_c_watchdog(wlc);
}
static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
{
wlc->wdtimer = brcms_init_timer(wlc->wl, brcms_c_watchdog_by_timer,
wlc, "watchdog");
if (!wlc->wdtimer) {
wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for wdtimer "
"failed\n", unit);
goto fail;
}
wlc->radio_timer = brcms_init_timer(wlc->wl, brcms_c_radio_timer,
wlc, "radio");
if (!wlc->radio_timer) {
wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for radio_timer "
"failed\n", unit);
goto fail;
}
return true;
fail:
return false;
}
/*
* Initialize brcms_c_info default values ...
* may get overrides later in this function
*/
static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
{
int i;
/* Save our copy of the chanspec */
wlc->chanspec = ch20mhz_chspec(1);
/* various 802.11g modes */
wlc->shortslot = false;
wlc->shortslot_override = BRCMS_SHORTSLOT_AUTO;
brcms_c_protection_upd(wlc, BRCMS_PROT_G_OVR, BRCMS_PROTECTION_AUTO);
brcms_c_protection_upd(wlc, BRCMS_PROT_G_SPEC, false);
brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG_OVR,
BRCMS_PROTECTION_AUTO);
brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG, BRCMS_N_PROTECTION_OFF);
brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF_OVR,
BRCMS_PROTECTION_AUTO);
brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF, false);
brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, AUTO);
brcms_c_protection_upd(wlc, BRCMS_PROT_OVERLAP,
BRCMS_PROTECTION_CTL_OVERLAP);
/* 802.11g draft 4.0 NonERP elt advertisement */
wlc->include_legacy_erp = true;
wlc->stf->ant_rx_ovr = ANT_RX_DIV_DEF;
wlc->stf->txant = ANT_TX_DEF;
wlc->prb_resp_timeout = BRCMS_PRB_RESP_TIMEOUT;
wlc->usr_fragthresh = DOT11_DEFAULT_FRAG_LEN;
for (i = 0; i < NFIFO; i++)
wlc->fragthresh[i] = DOT11_DEFAULT_FRAG_LEN;
wlc->RTSThresh = DOT11_DEFAULT_RTS_LEN;
/* default rate fallback retry limits */
wlc->SFBL = RETRY_SHORT_FB;
wlc->LFBL = RETRY_LONG_FB;
/* default mac retry limits */
wlc->SRL = RETRY_SHORT_DEF;
wlc->LRL = RETRY_LONG_DEF;
/* WME QoS mode is Auto by default */
wlc->pub->_ampdu = AMPDU_AGG_HOST;
}
static uint brcms_c_attach_module(struct brcms_c_info *wlc)
{
uint err = 0;
uint unit;
unit = wlc->pub->unit;
wlc->asi = brcms_c_antsel_attach(wlc);
if (wlc->asi == NULL) {
wiphy_err(wlc->wiphy, "wl%d: attach: antsel_attach "
"failed\n", unit);
err = 44;
goto fail;
}
wlc->ampdu = brcms_c_ampdu_attach(wlc);
if (wlc->ampdu == NULL) {
wiphy_err(wlc->wiphy, "wl%d: attach: ampdu_attach "
"failed\n", unit);
err = 50;
goto fail;
}
if ((brcms_c_stf_attach(wlc) != 0)) {
wiphy_err(wlc->wiphy, "wl%d: attach: stf_attach "
"failed\n", unit);
err = 68;
goto fail;
}
fail:
return err;
}
struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
{
return wlc->pub;
}
/* low level attach
* run backplane attach, init nvram
* run phy attach
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
struct pci_dev *pcidev = core->bus->host_pci;
struct ssb_sprom *sprom = &core->bus->sprom;
if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
pcidev->vendor,
pcidev->device);
else
brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
core->bus->boardinfo.vendor,
core->bus->boardinfo.type);
wme = true;
wlc_hw = wlc->hw;
wlc_hw->wlc = wlc;
wlc_hw->unit = unit;
wlc_hw->band = wlc_hw->bandstate[0];
wlc_hw->_piomode = piomode;
/* populate struct brcms_hardware with default values */
brcms_b_info_init(wlc_hw);
/*
* Do the hardware portion of the attach. Also initialize software
* state that depends on the particular hardware we are running.
*/
wlc_hw->sih = ai_attach(core->bus);
if (wlc_hw->sih == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
err = 11;
goto fail;
}
/* verify again the device is supported */
if (!brcms_c_chipmatch(core)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported device\n",
unit);
err = 12;
goto fail;
}
if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
wlc_hw->vendorid = pcidev->vendor;
wlc_hw->deviceid = pcidev->device;
} else {
wlc_hw->vendorid = core->bus->boardinfo.vendor;
wlc_hw->deviceid = core->bus->boardinfo.type;
}
wlc_hw->d11core = core;
wlc_hw->corerev = core->id.rev;
/* validate chip, chiprev and corerev */
if (!brcms_c_isgoodchip(wlc_hw)) {
err = 13;
goto fail;
}
/* initialize power control registers */
ai_clkctl_init(wlc_hw->sih);
/* request fastclock and force fastclock for the rest of attach
* bring the d11 core out of reset.
* For PMU chips, the first wlc_clkctl_clk is no-op since core-clk
* is still false; But it will be called again inside wlc_corereset,
* after d11 is out of reset.
*/
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
if (!brcms_b_validate_chip_access(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: validate_chip_access "
"failed\n", unit);
err = 14;
goto fail;
}
/* get the board rev, used just below */
j = sprom->board_rev;
/* promote srom boardrev of 0xFF to 1 */
if (j == BOARDREV_PROMOTABLE)
j = BOARDREV_PROMOTED;
wlc_hw->boardrev = (u16) j;
if (!brcms_c_validboardtype(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
"board type (0x%x)" " or revision level (0x%x)\n",
unit, ai_get_boardtype(wlc_hw->sih),
wlc_hw->boardrev);
err = 15;
goto fail;
}
wlc_hw->sromrev = sprom->revision;
wlc_hw->boardflags = sprom->boardflags_lo + (sprom->boardflags_hi << 16);
wlc_hw->boardflags2 = sprom->boardflags2_lo + (sprom->boardflags2_hi << 16);
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
/* check device id(srom, nvram etc.) to set bands */
if (wlc_hw->deviceid == BCM43224_D11N_ID ||
wlc_hw->deviceid == BCM43224_D11N_ID_VEN1 ||
wlc_hw->deviceid == BCM43224_CHIP_ID)
/* Dualband boards */
wlc_hw->_nbands = 2;
else
wlc_hw->_nbands = 1;
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
* unconditionally does the init of these values
*/
wlc->vendorid = wlc_hw->vendorid;
wlc->deviceid = wlc_hw->deviceid;
wlc->pub->sih = wlc_hw->sih;
wlc->pub->corerev = wlc_hw->corerev;
wlc->pub->sromrev = wlc_hw->sromrev;
wlc->pub->boardrev = wlc_hw->boardrev;
wlc->pub->boardflags = wlc_hw->boardflags;
wlc->pub->boardflags2 = wlc_hw->boardflags2;
wlc->pub->_nbands = wlc_hw->_nbands;
wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
if (wlc_hw->physhim == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_shim_attach "
"failed\n", unit);
err = 25;
goto fail;
}
/* pass all the parameters to wlc_phy_shared_attach in one struct */
sha_params.sih = wlc_hw->sih;
sha_params.physhim = wlc_hw->physhim;
sha_params.unit = unit;
sha_params.corerev = wlc_hw->corerev;
sha_params.vid = wlc_hw->vendorid;
sha_params.did = wlc_hw->deviceid;
sha_params.chip = ai_get_chip_id(wlc_hw->sih);
sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
sha_params.sromrev = wlc_hw->sromrev;
sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
sha_params.boardrev = wlc_hw->boardrev;
sha_params.boardflags = wlc_hw->boardflags;
sha_params.boardflags2 = wlc_hw->boardflags2;
/* alloc and save pointer to shared phy state area */
wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
if (!wlc_hw->phy_sh) {
err = 16;
goto fail;
}
/* initialize software state for each core and band */
for (j = 0; j < wlc_hw->_nbands; j++) {
/*
* band0 is always 2.4Ghz
* band1, if present, is 5Ghz
*/
brcms_c_setxband(wlc_hw, j);
wlc_hw->band->bandunit = j;
wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->core->coreidx = core->core_index;
wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
(wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
ARRAY_SIZE(xmtfifo_sz));
wlc_hw->xmtfifo_sz =
xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
WARN_ON(!wlc_hw->xmtfifo_sz[0]);
/* Get a phy for this band */
wlc_hw->band->pi =
wlc_phy_attach(wlc_hw->phy_sh, core,
wlc_hw->band->bandtype,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_"
"attach failed\n", unit);
err = 17;
goto fail;
}
wlc_phy_machwcap_set(wlc_hw->band->pi, wlc_hw->machwcap);
wlc_phy_get_phyversion(wlc_hw->band->pi, &wlc_hw->band->phytype,
&wlc_hw->band->phyrev,
&wlc_hw->band->radioid,
&wlc_hw->band->radiorev);
wlc_hw->band->abgphy_encore =
wlc_phy_get_encore(wlc_hw->band->pi);
wlc->band->abgphy_encore = wlc_phy_get_encore(wlc_hw->band->pi);
wlc_hw->band->core_flags =
wlc_phy_get_coreflags(wlc_hw->band->pi);
/* verify good phy_type & supported phy revision */
if (BRCMS_ISNPHY(wlc_hw->band)) {
if (NCONF_HAS(wlc_hw->band->phyrev))
goto good_phy;
else
goto bad_phy;
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (LCNCONF_HAS(wlc_hw->band->phyrev))
goto good_phy;
else
goto bad_phy;
} else {
bad_phy:
wiphy_err(wiphy, "wl%d: brcms_b_attach: unsupported "
"phy type/rev (%d/%d)\n", unit,
wlc_hw->band->phytype, wlc_hw->band->phyrev);
err = 18;
goto fail;
}
good_phy:
/*
* BMAC_NOTE: wlc->band->pi should not be set below and should
* be done in the high level attach. However we can not make
* that change until all low level access is changed to
* wlc_hw->band->pi. Instead do the wlc->band->pi init below,
* keeping wlc_hw->band->pi as well for incremental update of
* low level fns, and cut over low only init when all fns
* updated.
*/
wlc->band->pi = wlc_hw->band->pi;
wlc->band->phytype = wlc_hw->band->phytype;
wlc->band->phyrev = wlc_hw->band->phyrev;
wlc->band->radioid = wlc_hw->band->radioid;
wlc->band->radiorev = wlc_hw->band->radiorev;
/* default contention windows size limits */
wlc_hw->band->CWmin = APHY_CWMIN;
wlc_hw->band->CWmax = PHY_CWMAX;
if (!brcms_b_attach_dmapio(wlc, j, wme)) {
err = 19;
goto fail;
}
}
/* disable core to match driver "down" state */
brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
/* *******************************************************************
* The hardware is in the DOWN state at this point. D11 core
* or cores are in reset with clocks off, and the board PLLs
* are off if possible.
*
* Beyond this point, wlc->sbclk == false and chip registers
* should not be touched.
*********************************************************************
*/
/* init etheraddr state variables */
brcms_c_get_macaddr(wlc_hw, wlc_hw->etheraddr);
if (is_broadcast_ether_addr(wlc_hw->etheraddr) ||
is_zero_ether_addr(wlc_hw->etheraddr)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr\n",
unit);
err = 22;
goto fail;
}
brcms_dbg_info(wlc_hw->d11core, "deviceid 0x%x nbands %d board 0x%x\n",
wlc_hw->deviceid, wlc_hw->_nbands,
ai_get_boardtype(wlc_hw->sih));
return err;
fail:
wiphy_err(wiphy, "wl%d: brcms_b_attach: failed with err %d\n", unit,
err);
return err;
}
static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc)
{
uint unit;
unit = wlc->pub->unit;
if ((wlc->band->antgain == -1) && (wlc->pub->sromrev == 1)) {
/* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
wlc->band->antgain = 8;
} else if (wlc->band->antgain == -1) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
" srom, using 2dB\n", unit, __func__);
wlc->band->antgain = 8;
} else {
s8 gain, fract;
/* Older sroms specified gain in whole dbm only. In order
* be able to specify qdbm granularity and remain backward
* compatible the whole dbms are now encoded in only
* low 6 bits and remaining qdbms are encoded in the hi 2 bits.
* 6 bit signed number ranges from -32 - 31.
*
* Examples:
* 0x1 = 1 db,
* 0xc1 = 1.75 db (1 + 3 quarters),
* 0x3f = -1 (-1 + 0 quarters),
* 0x7f = -.75 (-1 + 1 quarters) = -3 qdbm.
* 0xbf = -.50 (-1 + 2 quarters) = -2 qdbm.
*/
gain = wlc->band->antgain & 0x3f;
gain <<= 2; /* Sign extend */
gain >>= 2;
fract = (wlc->band->antgain & 0xc0) >> 6;
wlc->band->antgain = 4 * gain + fract;
}
}
static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
{
int aa;
uint unit;
int bandtype;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
unit = wlc->pub->unit;
bandtype = wlc->band->bandtype;
/* get antennas available */
if (bandtype == BRCM_BAND_5G)
aa = sprom->ant_available_a;
else
aa = sprom->ant_available_bg;
if ((aa < 1) || (aa > 15)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
" srom (0x%x), using 3\n", unit, __func__, aa);
aa = 3;
}
/* reset the defaults if we have a single antenna */
if (aa == 1) {
wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_0;
wlc->stf->txant = ANT_TX_FORCE_0;
} else if (aa == 2) {
wlc->stf->ant_rx_ovr = ANT_RX_DIV_FORCE_1;
wlc->stf->txant = ANT_TX_FORCE_1;
} else {
}
/* Compute Antenna Gain */
if (bandtype == BRCM_BAND_5G)
wlc->band->antgain = sprom->antenna_gain.a1;
else
wlc->band->antgain = sprom->antenna_gain.a0;
brcms_c_attach_antgain_init(wlc);
return true;
}
static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
{
u16 chanspec;
struct brcms_band *band;
struct brcms_bss_info *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
memset(bi, 0, sizeof(*bi));
bi->beacon_period = BEACON_INTERVAL_DEFAULT;
/* fill the default channel as the first valid channel
* starting from the 2G channels
*/
chanspec = ch20mhz_chspec(1);
wlc->home_chanspec = bi->chanspec = chanspec;
/* find the band of our default channel */
band = wlc->band;
if (wlc->pub->_nbands > 1 &&
band->bandunit != chspec_bandunit(chanspec))
band = wlc->bandstate[OTHERBANDUNIT(wlc)];
/* init bss rates to the band specific default rate set */
brcms_c_rateset_default(&bi->rateset, NULL, band->phytype,
band->bandtype, false, BRCMS_RATE_MASK_FULL,
(bool) (wlc->pub->_n_enab & SUPPORT_11N),
brcms_chspec_bw(chanspec), wlc->stf->txstreams);
if (wlc->pub->_n_enab & SUPPORT_11N)
bi->flags |= BRCMS_BSS_HT;
}
static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
{
uint i;
struct brcms_band *band;
for (i = 0; i < wlc->pub->_nbands; i++) {
band = wlc->bandstate[i];
if (band->bandtype == BRCM_BAND_5G) {
if ((bwcap == BRCMS_N_BW_40ALL)
|| (bwcap == BRCMS_N_BW_20IN2G_40IN5G))
band->mimo_cap_40 = true;
else
band->mimo_cap_40 = false;
} else {
if (bwcap == BRCMS_N_BW_40ALL)
band->mimo_cap_40 = true;
else
band->mimo_cap_40 = false;
}
}
}
static void brcms_c_timers_deinit(struct brcms_c_info *wlc)
{
/* free timer state */
if (wlc->wdtimer) {
brcms_free_timer(wlc->wdtimer);
wlc->wdtimer = NULL;
}
if (wlc->radio_timer) {
brcms_free_timer(wlc->radio_timer);
wlc->radio_timer = NULL;
}
}
static void brcms_c_detach_module(struct brcms_c_info *wlc)
{
if (wlc->asi) {
brcms_c_antsel_detach(wlc->asi);
wlc->asi = NULL;
}
if (wlc->ampdu) {
brcms_c_ampdu_detach(wlc->ampdu);
wlc->ampdu = NULL;
}
brcms_c_stf_detach(wlc);
}
/*
* low level detach
*/
static int brcms_b_detach(struct brcms_c_info *wlc)
{
uint i;
struct brcms_hw_band *band;
struct brcms_hardware *wlc_hw = wlc->hw;
int callbacks;
callbacks = 0;
brcms_b_detach_dmapio(wlc_hw);
band = wlc_hw->band;
for (i = 0; i < wlc_hw->_nbands; i++) {
if (band->pi) {
/* Detach this band's phy */
wlc_phy_detach(band->pi);
band->pi = NULL;
}
band = wlc_hw->bandstate[OTHERBANDUNIT(wlc)];
}
/* Free shared phy state */
kfree(wlc_hw->phy_sh);
wlc_phy_shim_detach(wlc_hw->physhim);
if (wlc_hw->sih) {
ai_detach(wlc_hw->sih);
wlc_hw->sih = NULL;
}
return callbacks;
}
/*
* Return a count of the number of driver callbacks still pending.
*
* General policy is that brcms_c_detach can only dealloc/free software states.
* It can NOT touch hardware registers since the d11core may be in reset and
* clock may not be available.
* One exception is sb register access, which is possible if crystal is turned
* on after "down" state, driver should avoid software timer with the exception
* of radio_monitor.
*/
uint brcms_c_detach(struct brcms_c_info *wlc)
{
uint callbacks = 0;
if (wlc == NULL)
return 0;
callbacks += brcms_b_detach(wlc);
/* delete software timers */
if (!brcms_c_radio_monitor_stop(wlc))
callbacks++;
brcms_c_channel_mgr_detach(wlc->cmi);
brcms_c_timers_deinit(wlc);
brcms_c_detach_module(wlc);
brcms_c_detach_mfree(wlc);
return callbacks;
}
/* update state that depends on the current value of "ap" */
static void brcms_c_ap_upd(struct brcms_c_info *wlc)
{
/* STA-BSS; short capable */
wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
{
if (wlc_hw->wlc->pub->hw_up)
return;
brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
/*
* Enable pll and xtal, initialize the power control registers,
* and force fastclock for the remainder of brcms_c_up().
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* TODO: test suspend/resume
*
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
/*
* Inform phy that a POR reset has occurred so
* it does a complete phy init
*/
wlc_phy_por_inform(wlc_hw->band->pi);
wlc_hw->ucode_loaded = false;
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
&& (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
ai_epa_4313war(wlc_hw->sih);
}
}
static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
{
brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
/*
* Enable pll and xtal, initialize the power control registers,
* and force fastclock for the remainder of brcms_c_up().
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
true);
/*
* Need to read the hwradio status here to cover the case where the
* system is loaded with the hw radio disabled. We do not want to
* bring the driver up in this case.
*/
if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
ai_pci_down(wlc_hw->sih);
brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
ai_pci_up(wlc_hw->sih);
/* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
return 0;
}
static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
{
wlc_hw->up = true;
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
/* FULLY enable dynamic power control and d11 core interrupt */
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
brcms_intrson(wlc_hw->wlc->wl);
return 0;
}
/*
* Write WME tunable parameters for retransmit/max rate
* from wlc struct to ucode
*/
static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
{
int ac;
/* Need clock to do this */
if (!wlc->clk)
return;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
wlc->wme_retries[ac]);
}
/* make interface operational */
int brcms_c_up(struct brcms_c_info *wlc)
{
struct ieee80211_channel *ch;
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* HW is turned off so don't try to access it */
if (wlc->pub->hw_off || brcms_deviceremoved(wlc))
return -ENOMEDIUM;
if (!wlc->pub->hw_up) {
brcms_b_hw_up(wlc->hw);
wlc->pub->hw_up = true;
}
if ((wlc->pub->boardflags & BFL_FEM)
&& (ai_get_chip_id(wlc->hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
MHF5_4313_GPIOCTRL, BRCM_BAND_ALL);
else
brcms_b_mhf(wlc->hw, MHF4, MHF4_EXTPA_ENABLE,
MHF4_EXTPA_ENABLE, BRCM_BAND_ALL);
}
/*
* Need to read the hwradio status here to cover the case where the
* system is loaded with the hw radio disabled. We do not want to bring
* the driver up in this case. If radio is disabled, abort up, lower
* power, start radio timer and return 0(for NDIS) don't call
* radio_update to avoid looping brcms_c_up.
*
* brcms_b_up_prep() returns either 0 or -BCME_RADIOOFF only
*/
if (!wlc->pub->radio_disabled) {
int status = brcms_b_up_prep(wlc->hw);
if (status == -ENOMEDIUM) {
if (!mboolisset
(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE)) {
struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
mboolset(wlc->pub->radio_disabled,
WL_RADIO_HW_DISABLE);
if (bsscfg->type == BRCMS_TYPE_STATION ||
bsscfg->type == BRCMS_TYPE_ADHOC)
brcms_err(wlc->hw->d11core,
"wl%d: up: rfdisable -> "
"bsscfg_disable()\n",
wlc->pub->unit);
}
}
}
if (wlc->pub->radio_disabled) {
brcms_c_radio_monitor_start(wlc);
return 0;
}
/* brcms_b_up_prep has done brcms_c_corereset(). so clk is on, set it */
wlc->clk = true;
brcms_c_radio_monitor_stop(wlc);
/* Set EDCF hostflags */
brcms_b_mhf(wlc->hw, MHF1, MHF1_EDCF, MHF1_EDCF, BRCM_BAND_ALL);
brcms_init(wlc->wl);
wlc->pub->up = true;
if (wlc->bandinit_pending) {
ch = wlc->pub->ieee_hw->conf.chandef.chan;
brcms_c_suspend_mac_and_wait(wlc);
brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
wlc->bandinit_pending = false;
brcms_c_enable_mac(wlc);
}
brcms_b_up_finish(wlc->hw);
/* Program the TX wme params with the current settings */
brcms_c_wme_retries_write(wlc);
/* start one second watchdog timer */
brcms_add_timer(wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true);
wlc->WDarmed = true;
/* ensure antenna config is up to date */
brcms_c_stf_phy_txant_upd(wlc);
/* ensure LDPC config is in sync */
brcms_c_ht_update_ldpc(wlc, wlc->stf->ldpc);
return 0;
}
static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
{
uint callbacks = 0;
return callbacks;
}
static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
uint callbacks = 0;
if (!wlc_hw->up)
return callbacks;
dev_gone = brcms_deviceremoved(wlc_hw->wlc);
/* disable interrupts */
if (dev_gone)
wlc_hw->wlc->macintmask = 0;
else {
/* now disable interrupts */
brcms_intrsoff(wlc_hw->wlc->wl);
/* ensure we're running on the pll clock again */
brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
}
/* down phy at the last of this stage */
callbacks += wlc_phy_down(wlc_hw->band->pi);
return callbacks;
}
static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
{
uint callbacks = 0;
bool dev_gone;
if (!wlc_hw->up)
return callbacks;
wlc_hw->up = false;
wlc_phy_hw_state_upd(wlc_hw->band->pi, false);
dev_gone = brcms_deviceremoved(wlc_hw->wlc);
if (dev_gone) {
wlc_hw->sbclk = false;
wlc_hw->clk = false;
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
/* reclaim any posted packets */
brcms_c_flushqueues(wlc_hw->wlc);
} else {
/* Reset and disable the core */
if (bcma_core_is_enabled(wlc_hw->d11core)) {
if (bcma_read32(wlc_hw->d11core,
D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
callbacks += brcms_reset(wlc_hw->wlc->wl);
brcms_c_coredisable(wlc_hw);
}
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
ai_pci_down(wlc_hw->sih);
brcms_b_xtal(wlc_hw, OFF);
}
}
return callbacks;
}
/*
* Mark the interface nonoperational, stop the software mechanisms,
* disable the hardware, free any transient buffer state.
* Return a count of the number of driver callbacks still pending.
*/
uint brcms_c_down(struct brcms_c_info *wlc)
{
uint callbacks = 0;
int i;
bool dev_gone = false;
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* check if we are already in the going down path */
if (wlc->going_down) {
brcms_err(wlc->hw->d11core,
"wl%d: %s: Driver going down so return\n",
wlc->pub->unit, __func__);
return 0;
}
if (!wlc->pub->up)
return callbacks;
wlc->going_down = true;
callbacks += brcms_b_bmac_down_prep(wlc->hw);
dev_gone = brcms_deviceremoved(wlc);
/* Call any registered down handlers */
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].down_fn)
callbacks +=
wlc->modulecb[i].down_fn(wlc->modulecb[i].hdl);
}
/* cancel the watchdog timer */
if (wlc->WDarmed) {
if (!brcms_del_timer(wlc->wdtimer))
callbacks++;
wlc->WDarmed = false;
}
/* cancel all other timers */
callbacks += brcms_c_down_del_timer(wlc);
wlc->pub->up = false;
wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
callbacks += brcms_b_down_finish(wlc->hw);
/* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
wlc->clk = false;
wlc->going_down = false;
return callbacks;
}
/* Set the current gmode configuration */
int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
{
int ret = 0;
uint i;
struct brcms_c_rateset rs;
/* Default to 54g Auto */
/* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
s8 shortslot = BRCMS_SHORTSLOT_AUTO;
bool shortslot_restrict = false; /* Restrict association to stations
* that support shortslot
*/
bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
/* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
int preamble = BRCMS_PLCP_LONG;
bool preamble_restrict = false; /* Restrict association to stations
* that support short preambles
*/
struct brcms_band *band;
/* if N-support is enabled, allow Gmode set as long as requested
* Gmode is not GMODE_LEGACY_B
*/
if ((wlc->pub->_n_enab & SUPPORT_11N) && gmode == GMODE_LEGACY_B)
return -ENOTSUPP;
/* verify that we are dealing with 2G band and grab the band pointer */
if (wlc->band->bandtype == BRCM_BAND_2G)
band = wlc->band;
else if ((wlc->pub->_nbands > 1) &&
(wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == BRCM_BAND_2G))
band = wlc->bandstate[OTHERBANDUNIT(wlc)];
else
return -EINVAL;
/* update configuration value */
if (config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear rateset override */
memset(&rs, 0, sizeof(rs));
switch (gmode) {
case GMODE_LEGACY_B:
shortslot = BRCMS_SHORTSLOT_OFF;
brcms_c_rateset_copy(&gphy_legacy_rates, &rs);
break;
case GMODE_LRS:
break;
case GMODE_AUTO:
/* Accept defaults */
break;
case GMODE_ONLY:
ofdm_basic = true;
preamble = BRCMS_PLCP_SHORT;
preamble_restrict = true;
break;
case GMODE_PERFORMANCE:
shortslot = BRCMS_SHORTSLOT_ON;
shortslot_restrict = true;
ofdm_basic = true;
preamble = BRCMS_PLCP_SHORT;
preamble_restrict = true;
break;
default:
/* Error */
brcms_err(wlc->hw->d11core, "wl%d: %s: invalid gmode %d\n",
wlc->pub->unit, __func__, gmode);
return -ENOTSUPP;
}
band->gmode = gmode;
wlc->shortslot_override = shortslot;
/* Use the default 11g rateset */
if (!rs.count)
brcms_c_rateset_copy(&cck_ofdm_rates, &rs);
if (ofdm_basic) {
for (i = 0; i < rs.count; i++) {
if (rs.rates[i] == BRCM_RATE_6M
|| rs.rates[i] == BRCM_RATE_12M
|| rs.rates[i] == BRCM_RATE_24M)
rs.rates[i] |= BRCMS_RATE_FLAG;
}
}
/* Set default bss rateset */
wlc->default_bss->rateset.count = rs.count;
memcpy(wlc->default_bss->rateset.rates, rs.rates,
sizeof(wlc->default_bss->rateset.rates));
return ret;
}
int brcms_c_set_nmode(struct brcms_c_info *wlc)
{
uint i;
s32 nmode = AUTO;
if (wlc->stf->txstreams == WL_11N_3x3)
nmode = WL_11N_3x3;
else
nmode = WL_11N_2x2;
/* force GMODE_AUTO if NMODE is ON */
brcms_c_set_gmode(wlc, GMODE_AUTO, true);
if (nmode == WL_11N_3x3)
wlc->pub->_n_enab = SUPPORT_HT;
else
wlc->pub->_n_enab = SUPPORT_11N;
wlc->default_bss->flags |= BRCMS_BSS_HT;
/* add the mcs rates to the default and hw ratesets */
brcms_c_rateset_mcs_build(&wlc->default_bss->rateset,
wlc->stf->txstreams);
for (i = 0; i < wlc->pub->_nbands; i++)
memcpy(wlc->bandstate[i]->hw_rateset.mcs,
wlc->default_bss->rateset.mcs, MCSSET_LEN);
return 0;
}
static int
brcms_c_set_internal_rateset(struct brcms_c_info *wlc,
struct brcms_c_rateset *rs_arg)
{
struct brcms_c_rateset rs, new;
uint bandunit;
memcpy(&rs, rs_arg, sizeof(struct brcms_c_rateset));
/* check for bad count value */
if ((rs.count == 0) || (rs.count > BRCMS_NUMRATES))
return -EINVAL;
/* try the current band */
bandunit = wlc->band->bandunit;
memcpy(&new, &rs, sizeof(struct brcms_c_rateset));
if (brcms_c_rate_hwrs_filter_sort_validate
(&new, &wlc->bandstate[bandunit]->hw_rateset, true,
wlc->stf->txstreams))
goto good;
/* try the other band */
if (brcms_is_mband_unlocked(wlc)) {
bandunit = OTHERBANDUNIT(wlc);
memcpy(&new, &rs, sizeof(struct brcms_c_rateset));
if (brcms_c_rate_hwrs_filter_sort_validate(&new,
&wlc->
bandstate[bandunit]->
hw_rateset, true,
wlc->stf->txstreams))
goto good;
}
return -EBADE;
good:
/* apply new rateset */
memcpy(&wlc->default_bss->rateset, &new,
sizeof(struct brcms_c_rateset));
memcpy(&wlc->bandstate[bandunit]->defrateset, &new,
sizeof(struct brcms_c_rateset));
return 0;
}
static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
{
u8 r;
bool war = false;
if (wlc->pub->associated)
r = wlc->bsscfg->current_bss->rateset.rates[0];
else
r = wlc->default_bss->rateset.rates[0];
wlc_phy_ofdm_rateset_war(wlc->band->pi, war);
}
int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
{
u16 chspec = ch20mhz_chspec(channel);
if (channel < 0 || channel > MAXCHANNEL)
return -EINVAL;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
return -EINVAL;
if (!wlc->pub->up && brcms_is_mband_unlocked(wlc)) {
if (wlc->band->bandunit != chspec_bandunit(chspec))
wlc->bandinit_pending = true;
else
wlc->bandinit_pending = false;
}
wlc->default_bss->chanspec = chspec;
/* brcms_c_BSSinit() will sanitize the rateset before
* using it.. */
if (wlc->pub->up && (wlc_phy_chanspec_get(wlc->band->pi) != chspec)) {
brcms_c_set_home_chanspec(wlc, chspec);
brcms_c_suspend_mac_and_wait(wlc);
brcms_c_set_chanspec(wlc, chspec);
brcms_c_enable_mac(wlc);
}
return 0;
}
int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
{
int ac;
if (srl < 1 || srl > RETRY_SHORT_MAX ||
lrl < 1 || lrl > RETRY_SHORT_MAX)
return -EINVAL;
wlc->SRL = srl;
wlc->LRL = lrl;
brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_SHORT, wlc->SRL);
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_LONG, wlc->LRL);
}
brcms_c_wme_retries_write(wlc);
return 0;
}
void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
struct brcm_rateset *currs)
{
struct brcms_c_rateset *rs;
if (wlc->pub->associated)
rs = &wlc->bsscfg->current_bss->rateset;
else
rs = &wlc->default_bss->rateset;
/* Copy only legacy rateset section */
currs->count = rs->count;
memcpy(&currs->rates, &rs->rates, rs->count);
}
int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
{
struct brcms_c_rateset internal_rs;
int bcmerror;
if (rs->count > BRCMS_NUMRATES)
return -ENOBUFS;
memset(&internal_rs, 0, sizeof(internal_rs));
/* Copy only legacy rateset section */
internal_rs.count = rs->count;
memcpy(&internal_rs.rates, &rs->rates, internal_rs.count);
/* merge rateset coming in with the current mcsset */
if (wlc->pub->_n_enab & SUPPORT_11N) {
struct brcms_bss_info *mcsset_bss;
if (wlc->pub->associated)
mcsset_bss = wlc->bsscfg->current_bss;
else
mcsset_bss = wlc->default_bss;
memcpy(internal_rs.mcs, &mcsset_bss->rateset.mcs[0],
MCSSET_LEN);
}
bcmerror = brcms_c_set_internal_rateset(wlc, &internal_rs);
if (!bcmerror)
brcms_c_ofdm_rateset_war(wlc);
return bcmerror;
}
static void brcms_c_time_lock(struct brcms_c_info *wlc)
{
bcma_set32(wlc->hw->d11core, D11REGOFFS(maccontrol), MCTL_TBTTHOLD);
/* Commit the write */
bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
}
static void brcms_c_time_unlock(struct brcms_c_info *wlc)
{
bcma_mask32(wlc->hw->d11core, D11REGOFFS(maccontrol), ~MCTL_TBTTHOLD);
/* Commit the write */
bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
}
int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
{
u32 bcnint_us;
if (period == 0)
return -EINVAL;
wlc->default_bss->beacon_period = period;
bcnint_us = period << 10;
brcms_c_time_lock(wlc);
bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_cfprep),
(bcnint_us << CFPREP_CBI_SHIFT));
bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_cfpstart), bcnint_us);
brcms_c_time_unlock(wlc);
return 0;
}
u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx)
{
return wlc->band->phytype;
}
void brcms_c_set_shortslot_override(struct brcms_c_info *wlc, s8 sslot_override)
{
wlc->shortslot_override = sslot_override;
/*
* shortslot is an 11g feature, so no more work if we are
* currently on the 5G band
*/
if (wlc->band->bandtype == BRCM_BAND_5G)
return;
if (wlc->pub->up && wlc->pub->associated) {
/* let watchdog or beacon processing update shortslot */
} else if (wlc->pub->up) {
/* unassociated shortslot is off */
brcms_c_switch_shortslot(wlc, false);
} else {
/* driver is down, so just update the brcms_c_info
* value */
if (wlc->shortslot_override == BRCMS_SHORTSLOT_AUTO)
wlc->shortslot = false;
else
wlc->shortslot =
(wlc->shortslot_override ==
BRCMS_SHORTSLOT_ON);
}
}
/*
* register watchdog and down handlers.
*/
int brcms_c_module_register(struct brcms_pub *pub,
const char *name, struct brcms_info *hdl,
int (*d_fn)(void *handle))
{
struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
int i;
/* find an empty entry and just add, no duplication check! */
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].name[0] == '\0') {
strncpy(wlc->modulecb[i].name, name,
sizeof(wlc->modulecb[i].name) - 1);
wlc->modulecb[i].hdl = hdl;
wlc->modulecb[i].down_fn = d_fn;
return 0;
}
}
return -ENOSR;
}
/* unregister module callbacks */
int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
struct brcms_info *hdl)
{
struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
int i;
if (wlc == NULL)
return -ENODATA;
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i]));
return 0;
}
}
/* table not found! */
return -ENODATA;
}
static bool brcms_c_chipmatch_pci(struct bcma_device *core)
{
struct pci_dev *pcidev = core->bus->host_pci;
u16 vendor = pcidev->vendor;
u16 device = pcidev->device;
if (vendor != PCI_VENDOR_ID_BROADCOM) {
pr_err("unknown vendor id %04x\n", vendor);
return false;
}
if (device == BCM43224_D11N_ID_VEN1 || device == BCM43224_CHIP_ID)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
if (device == BCM4313_D11N2G_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
pr_err("unknown device id %04x\n", device);
return false;
}
static bool brcms_c_chipmatch_soc(struct bcma_device *core)
{
struct bcma_chipinfo *chipinfo = &core->bus->chipinfo;
if (chipinfo->id == BCMA_CHIP_ID_BCM4716)
return true;
pr_err("unknown chip id %04x\n", chipinfo->id);
return false;
}
bool brcms_c_chipmatch(struct bcma_device *core)
{
switch (core->bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
return brcms_c_chipmatch_pci(core);
case BCMA_HOSTTYPE_SOC:
return brcms_c_chipmatch_soc(core);
default:
pr_err("unknown host type: %i\n", core->bus->hosttype);
return false;
}
}
u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
{
u16 table_ptr;
u8 phy_rate, index;
/* get the phy specific rate encoding for the PLCP SIGNAL field */
if (is_ofdm_rate(rate))
table_ptr = M_RT_DIRMAP_A;
else
table_ptr = M_RT_DIRMAP_B;
/* for a given rate, the LS-nibble of the PLCP SIGNAL field is
* the index into the rate table.
*/
phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
index = phy_rate & 0xf;
/* Find the SHM pointer to the rate table entry by looking in the
* Direct-map Table
*/
return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
}
/*
* bcmc_fid_generate:
* Generate frame ID for a BCMC packet. The frag field is not used
* for MC frames so is used as part of the sequence number.
*/
static inline u16
bcmc_fid_generate(struct brcms_c_info *wlc, struct brcms_bss_cfg *bsscfg,
struct d11txh *txh)
{
u16 frameid;
frameid = le16_to_cpu(txh->TxFrameID) & ~(TXFID_SEQ_MASK |
TXFID_QUEUE_MASK);
frameid |=
(((wlc->
mc_fid_counter++) << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
TX_BCMC_FIFO;
return frameid;
}
static uint
brcms_c_calc_ack_time(struct brcms_c_info *wlc, u32 rspec,
u8 preamble_type)
{
uint dur = 0;
/*
* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
* is less than or equal to the rate of the immediately previous
* frame in the FES
*/
rspec = brcms_basic_rate(wlc, rspec);
/* ACK frame len == 14 == 2(fc) + 2(dur) + 6(ra) + 4(fcs) */
dur =
brcms_c_calc_frame_time(wlc, rspec, preamble_type,
(DOT11_ACK_LEN + FCS_LEN));
return dur;
}
static uint
brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec,
u8 preamble_type)
{
return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
}
static uint
brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec,
u8 preamble_type)
{
/*
* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
* is less than or equal to the rate of the immediately previous
* frame in the FES
*/
rspec = brcms_basic_rate(wlc, rspec);
/* BA len == 32 == 16(ctl hdr) + 4(ba len) + 8(bitmap) + 4(fcs) */
return brcms_c_calc_frame_time(wlc, rspec, preamble_type,
(DOT11_BA_LEN + DOT11_BA_BITMAP_LEN +
FCS_LEN));
}
/* brcms_c_compute_frame_dur()
*
* Calculate the 802.11 MAC header DUR field for MPDU
* DUR for a single frame = 1 SIFS + 1 ACK
* DUR for a frame with following frags = 3 SIFS + 2 ACK + next frag time
*
* rate MPDU rate in unit of 500kbps
* next_frag_len next MPDU length in bytes
* preamble_type use short/GF or long/MM PLCP header
*/
static u16
brcms_c_compute_frame_dur(struct brcms_c_info *wlc, u32 rate,
u8 preamble_type, uint next_frag_len)
{
u16 dur, sifs;
sifs = get_sifs(wlc->band);
dur = sifs;
dur += (u16) brcms_c_calc_ack_time(wlc, rate, preamble_type);
if (next_frag_len) {
/* Double the current DUR to get 2 SIFS + 2 ACKs */
dur *= 2;
/* add another SIFS and the frag time */
dur += sifs;
dur +=
(u16) brcms_c_calc_frame_time(wlc, rate, preamble_type,
next_frag_len);
}
return dur;
}
/* The opposite of brcms_c_calc_frame_time */
static uint
brcms_c_calc_frame_len(struct brcms_c_info *wlc, u32 ratespec,
u8 preamble_type, uint dur)
{
uint nsyms, mac_len, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
dur -= PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
/* payload calculation matches that of regular ofdm */
if (wlc->band->bandtype == BRCM_BAND_2G)
dur -= DOT11_OFDM_SIGNAL_EXTENSION;
/* kNdbps = kbps * 4 */
kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
rspec_issgi(ratespec)) * 4;
nsyms = dur / APHY_SYMBOL_TIME;
mac_len =
((nsyms * kNdps) -
((APHY_SERVICE_NBITS + APHY_TAIL_NBITS) * 1000)) / 8000;
} else if (is_ofdm_rate(ratespec)) {
dur -= APHY_PREAMBLE_TIME;
dur -= APHY_SIGNAL_TIME;
/* Ndbps = Mbps * 4 = rate(500Kbps) * 2 */
Ndps = rate * 2;
nsyms = dur / APHY_SYMBOL_TIME;
mac_len =
((nsyms * Ndps) -
(APHY_SERVICE_NBITS + APHY_TAIL_NBITS)) / 8;
} else {
if (preamble_type & BRCMS_SHORT_PREAMBLE)
dur -= BPHY_PLCP_SHORT_TIME;
else
dur -= BPHY_PLCP_TIME;
mac_len = dur * rate;
/* divide out factor of 2 in rate (1/2 mbps) */
mac_len = mac_len / 8 / 2;
}
return mac_len;
}
/*
* Return true if the specified rate is supported by the specified band.
* BRCM_BAND_AUTO indicates the current band.
*/
static bool brcms_c_valid_rate(struct brcms_c_info *wlc, u32 rspec, int band,
bool verbose)
{
struct brcms_c_rateset *hw_rateset;
uint i;
if ((band == BRCM_BAND_AUTO) || (band == wlc->band->bandtype))
hw_rateset = &wlc->band->hw_rateset;
else if (wlc->pub->_nbands > 1)
hw_rateset = &wlc->bandstate[OTHERBANDUNIT(wlc)]->hw_rateset;
else
/* other band specified and we are a single band device */
return false;
/* check if this is a mimo rate */
if (is_mcs_rate(rspec)) {
if ((rspec & RSPEC_RATE_MASK) >= MCS_TABLE_SIZE)
goto error;
return isset(hw_rateset->mcs, (rspec & RSPEC_RATE_MASK));
}
for (i = 0; i < hw_rateset->count; i++)
if (hw_rateset->rates[i] == rspec2rate(rspec))
return true;
error:
if (verbose)
brcms_err(wlc->hw->d11core, "wl%d: valid_rate: rate spec 0x%x "
"not in hw_rateset\n", wlc->pub->unit, rspec);
return false;
}
static u32
mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
u32 int_val)
{
struct bcma_device *core = wlc->hw->d11core;
u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
u8 rate = int_val & NRATE_RATE_MASK;
u32 rspec;
bool ismcs = ((int_val & NRATE_MCS_INUSE) == NRATE_MCS_INUSE);
bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT);
bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY)
== NRATE_OVERRIDE_MCS_ONLY);
int bcmerror = 0;
if (!ismcs)
return (u32) rate;
/* validate the combination of rate/mcs/stf is allowed */
if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) {
/* mcs only allowed when nmode */
if (stf > PHY_TXC1_MODE_SDM) {
brcms_err(core, "wl%d: %s: Invalid stf\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
/* mcs 32 is a special case, DUP mode 40 only */
if (rate == 32) {
if (!CHSPEC_IS40(wlc->home_chanspec) ||
((stf != PHY_TXC1_MODE_SISO)
&& (stf != PHY_TXC1_MODE_CDD))) {
brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
/* mcs > 7 must use stf SDM */
} else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
/* mcs > 7 must use stf SDM */
if (stf != PHY_TXC1_MODE_SDM) {
brcms_dbg_mac80211(core, "wl%d: enabling "
"SDM mode for mcs %d\n",
wlc->pub->unit, rate);
stf = PHY_TXC1_MODE_SDM;
}
} else {
/*
* MCS 0-7 may use SISO, CDD, and for
* phy_rev >= 3 STBC
*/
if ((stf > PHY_TXC1_MODE_STBC) ||
(!BRCMS_STBC_CAP_PHY(wlc)
&& (stf == PHY_TXC1_MODE_STBC))) {
brcms_err(core, "wl%d: %s: Invalid STBC\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
}
} else if (is_ofdm_rate(rate)) {
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid OFDM\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
} else if (is_cck_rate(rate)) {
if ((cur_band->bandtype != BRCM_BAND_2G)
|| (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid CCK\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
} else {
brcms_err(core, "wl%d: %s: Unknown rate type\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
"request\n", wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
rspec = rate;
if (ismcs) {
rspec |= RSPEC_MIMORATE;
/* For STBC populate the STC field of the ratespec */
if (stf == PHY_TXC1_MODE_STBC) {
u8 stc;
stc = 1; /* Nss for single stream is always 1 */
rspec |= (stc << RSPEC_STC_SHIFT);
}
}
rspec |= (stf << RSPEC_STF_SHIFT);
if (override_mcs_only)
rspec |= RSPEC_OVERRIDE_MCS_ONLY;
if (issgi)
rspec |= RSPEC_SHORT_GI;
if ((rate != 0)
&& !brcms_c_valid_rate(wlc, rspec, cur_band->bandtype, true))
return rate;
return rspec;
done:
return rate;
}
/*
* Compute PLCP, but only requires actual rate and length of pkt.
* Rate is given in the driver standard multiple of 500 kbps.
* le is set for 11 Mbps rate if necessary.
* Broken out for PRQ.
*/
static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
uint length, u8 *plcp)
{
u16 usec = 0;
u8 le = 0;
switch (rate_500) {
case BRCM_RATE_1M:
usec = length << 3;
break;
case BRCM_RATE_2M:
usec = length << 2;
break;
case BRCM_RATE_5M5:
usec = (length << 4) / 11;
if ((length << 4) - (usec * 11) > 0)
usec++;
break;
case BRCM_RATE_11M:
usec = (length << 3) / 11;
if ((length << 3) - (usec * 11) > 0) {
usec++;
if ((usec * 11) - (length << 3) >= 8)
le = D11B_PLCP_SIGNAL_LE;
}
break;
default:
brcms_err(wlc->hw->d11core,
"brcms_c_cck_plcp_set: unsupported rate %d\n",
rate_500);
rate_500 = BRCM_RATE_1M;
usec = length << 3;
break;
}
/* PLCP signal byte */
plcp[0] = rate_500 * 5; /* r (500kbps) * 5 == r (100kbps) */
/* PLCP service byte */
plcp[1] = (u8) (le | D11B_PLCP_SIGNAL_LOCKED);
/* PLCP length u16, little endian */
plcp[2] = usec & 0xff;
plcp[3] = (usec >> 8) & 0xff;
/* PLCP CRC16 */
plcp[4] = 0;
plcp[5] = 0;
}
/* Rate: 802.11 rate code, length: PSDU length in octets */
static void brcms_c_compute_mimo_plcp(u32 rspec, uint length, u8 *plcp)
{
u8 mcs = (u8) (rspec & RSPEC_RATE_MASK);
plcp[0] = mcs;
if (rspec_is40mhz(rspec) || (mcs == 32))
plcp[0] |= MIMO_PLCP_40MHZ;
BRCMS_SET_MIMO_PLCP_LEN(plcp, length);
plcp[3] = rspec_mimoplcp3(rspec); /* rspec already holds this byte */
plcp[3] |= 0x7; /* set smoothing, not sounding ppdu & reserved */
plcp[4] = 0; /* number of extension spatial streams bit 0 & 1 */
plcp[5] = 0;
}
/* Rate: 802.11 rate code, length: PSDU length in octets */
static void
brcms_c_compute_ofdm_plcp(u32 rspec, u32 length, u8 *plcp)
{
u8 rate_signal;
u32 tmp = 0;
int rate = rspec2rate(rspec);
/*
* encode rate per 802.11a-1999 sec 17.3.4.1, with lsb
* transmitted first
*/
rate_signal = rate_info[rate] & BRCMS_RATE_MASK;
memset(plcp, 0, D11_PHY_HDR_LEN);
D11A_PHY_HDR_SRATE((struct ofdm_phy_hdr *) plcp, rate_signal);
tmp = (length & 0xfff) << 5;
plcp[2] |= (tmp >> 16) & 0xff;
plcp[1] |= (tmp >> 8) & 0xff;
plcp[0] |= tmp & 0xff;
}
/* Rate: 802.11 rate code, length: PSDU length in octets */
static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, u32 rspec,
uint length, u8 *plcp)
{
int rate = rspec2rate(rspec);
brcms_c_cck_plcp_set(wlc, rate, length, plcp);
}
static void
brcms_c_compute_plcp(struct brcms_c_info *wlc, u32 rspec,
uint length, u8 *plcp)
{
if (is_mcs_rate(rspec))
brcms_c_compute_mimo_plcp(rspec, length, plcp);
else if (is_ofdm_rate(rspec))
brcms_c_compute_ofdm_plcp(rspec, length, plcp);
else
brcms_c_compute_cck_plcp(wlc, rspec, length, plcp);
}
/* brcms_c_compute_rtscts_dur()
*
* Calculate the 802.11 MAC header DUR field for an RTS or CTS frame
* DUR for normal RTS/CTS w/ frame = 3 SIFS + 1 CTS + next frame time + 1 ACK
* DUR for CTS-TO-SELF w/ frame = 2 SIFS + next frame time + 1 ACK
*
* cts cts-to-self or rts/cts
* rts_rate rts or cts rate in unit of 500kbps
* rate next MPDU rate in unit of 500kbps
* frame_len next MPDU frame length in bytes
*/
u16
brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
u32 rts_rate,
u32 frame_rate, u8 rts_preamble_type,
u8 frame_preamble_type, uint frame_len, bool ba)
{
u16 dur, sifs;
sifs = get_sifs(wlc->band);
if (!cts_only) {
/* RTS/CTS */
dur = 3 * sifs;
dur +=
(u16) brcms_c_calc_cts_time(wlc, rts_rate,
rts_preamble_type);
} else {
/* CTS-TO-SELF */
dur = 2 * sifs;
}
dur +=
(u16) brcms_c_calc_frame_time(wlc, frame_rate, frame_preamble_type,
frame_len);
if (ba)
dur +=
(u16) brcms_c_calc_ba_time(wlc, frame_rate,
BRCMS_SHORT_PREAMBLE);
else
dur +=
(u16) brcms_c_calc_ack_time(wlc, frame_rate,
frame_preamble_type);
return dur;
}
static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
{
u16 phyctl1 = 0;
u16 bw;
if (BRCMS_ISLCNPHY(wlc->band)) {
bw = PHY_TXC1_BW_20MHZ;
} else {
bw = rspec_get_bw(rspec);
/* 10Mhz is not supported yet */
if (bw < PHY_TXC1_BW_20MHZ) {
brcms_err(wlc->hw->d11core, "phytxctl1_calc: bw %d is "
"not supported yet, set to 20L\n", bw);
bw = PHY_TXC1_BW_20MHZ;
}
}
if (is_mcs_rate(rspec)) {
uint mcs = rspec & RSPEC_RATE_MASK;
/* bw, stf, coding-type is part of rspec_phytxbyte2 returns */
phyctl1 = rspec_phytxbyte2(rspec);
/* set the upper byte of phyctl1 */
phyctl1 |= (mcs_table[mcs].tx_phy_ctl3 << 8);
} else if (is_cck_rate(rspec) && !BRCMS_ISLCNPHY(wlc->band)
&& !BRCMS_ISSSLPNPHY(wlc->band)) {
/*
* In CCK mode LPPHY overloads OFDM Modulation bits with CCK
* Data Rate. Eventually MIMOPHY would also be converted to
* this format
*/
/* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
phyctl1 = (bw | (rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT));
} else { /* legacy OFDM/CCK */
s16 phycfg;
/* get the phyctl byte from rate phycfg table */
phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec));
if (phycfg == -1) {
brcms_err(wlc->hw->d11core, "phytxctl1_calc: wrong "
"legacy OFDM/CCK rate\n");
phycfg = 0;
}
/* set the upper byte of phyctl1 */
phyctl1 =
(bw | (phycfg << 8) |
(rspec_stf(rspec) << PHY_TXC1_MODE_SHIFT));
}
return phyctl1;
}
/*
* Add struct d11txh, struct cck_phy_hdr.
*
* 'p' data must start with 802.11 MAC header
* 'p' must allow enough bytes of local headers to be "pushed" onto the packet
*
* headroom == D11_PHY_HDR_LEN + D11_TXH_LEN (D11_TXH_LEN is now 104 bytes)
*
*/
static u16
brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
struct sk_buff *p, struct scb *scb, uint frag,
uint nfrags, uint queue, uint next_frag_len)
{
struct ieee80211_hdr *h;
struct d11txh *txh;
u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
int len, phylen, rts_phylen;
u16 mch, phyctl, xfts, mainrates;
u16 seq = 0, mcl = 0, status = 0, frameid = 0;
u32 rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M };
u32 rts_rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M };
bool use_rts = false;
bool use_cts = false;
bool use_rifs = false;
bool short_preamble[2] = { false, false };
u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
struct ieee80211_rts *rts = NULL;
bool qos;
uint ac;
bool hwtkmic = false;
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
#define ANTCFG_NONE 0xFF
u8 antcfg = ANTCFG_NONE;
u8 fbantcfg = ANTCFG_NONE;
uint phyctl1_stf = 0;
u16 durid = 0;
struct ieee80211_tx_rate *txrate[2];
int k;
struct ieee80211_tx_info *tx_info;
bool is_mcs;
u16 mimo_txbw;
u8 mimo_preamble_type;
/* locate 802.11 MAC header */
h = (struct ieee80211_hdr *)(p->data);
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
len = p->len;
phylen = len + FCS_LEN;
/* Get tx_info */
tx_info = IEEE80211_SKB_CB(p);
/* add PLCP */
plcp = skb_push(p, D11_PHY_HDR_LEN);
/* add Broadcom tx descriptor header */
txh = (struct d11txh *) skb_push(p, D11_TXH_LEN);
memset(txh, 0, D11_TXH_LEN);
/* setup frameid */
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
/* non-AP STA should never use BCMC queue */
if (queue == TX_BCMC_FIFO) {
brcms_err(wlc->hw->d11core,
"wl%d: %s: ASSERT queue == TX_BCMC!\n",
wlc->pub->unit, __func__);
frameid = bcmc_fid_generate(wlc, NULL, txh);
} else {
/* Increment the counter for first fragment */
if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
scb->seqnum[p->priority]++;
/* extract fragment number from frame first */
seq = le16_to_cpu(h->seq_ctrl) & FRAGNUM_MASK;
seq |= (scb->seqnum[p->priority] << SEQNUM_SHIFT);
h->seq_ctrl = cpu_to_le16(seq);
frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
(queue & TXFID_QUEUE_MASK);
}
}
frameid |= queue & TXFID_QUEUE_MASK;
/* set the ignpmq bit for all pkts tx'd in PS mode and for beacons */
if (ieee80211_is_beacon(h->frame_control))
mcl |= TXC_IGNOREPMQ;
txrate[0] = tx_info->control.rates;
txrate[1] = txrate[0] + 1;
/*
* if rate control algorithm didn't give us a fallback
* rate, use the primary rate
*/
if (txrate[1]->idx < 0)
txrate[1] = txrate[0];
for (k = 0; k < hw->max_rates; k++) {
is_mcs = txrate[k]->flags & IEEE80211_TX_RC_MCS ? true : false;
if (!is_mcs) {
if ((txrate[k]->idx >= 0)
&& (txrate[k]->idx <
hw->wiphy->bands[tx_info->band]->n_bitrates)) {
rspec[k] =
hw->wiphy->bands[tx_info->band]->
bitrates[txrate[k]->idx].hw_value;
short_preamble[k] =
txrate[k]->
flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
true : false;
} else {
rspec[k] = BRCM_RATE_1M;
}
} else {
rspec[k] = mac80211_wlc_set_nrate(wlc, wlc->band,
NRATE_MCS_INUSE | txrate[k]->idx);
}
/*
* Currently only support same setting for primay and
* fallback rates. Unify flags for each rate into a
* single value for the frame
*/
use_rts |=
txrate[k]->
flags & IEEE80211_TX_RC_USE_RTS_CTS ? true : false;
use_cts |=
txrate[k]->
flags & IEEE80211_TX_RC_USE_CTS_PROTECT ? true : false;
/*
* (1) RATE:
* determine and validate primary rate
* and fallback rates
*/
if (!rspec_active(rspec[k])) {
rspec[k] = BRCM_RATE_1M;
} else {
if (!is_multicast_ether_addr(h->addr1)) {
/* set tx antenna config */
brcms_c_antsel_antcfg_get(wlc->asi, false,
false, 0, 0, &antcfg, &fbantcfg);
}
}
}
phyctl1_stf = wlc->stf->ss_opmode;
if (wlc->pub->_n_enab & SUPPORT_11N) {
for (k = 0; k < hw->max_rates; k++) {
/*
* apply siso/cdd to single stream mcs's or ofdm
* if rspec is auto selected
*/
if (((is_mcs_rate(rspec[k]) &&
is_single_stream(rspec[k] & RSPEC_RATE_MASK)) ||
is_ofdm_rate(rspec[k]))
&& ((rspec[k] & RSPEC_OVERRIDE_MCS_ONLY)
|| !(rspec[k] & RSPEC_OVERRIDE))) {
rspec[k] &= ~(RSPEC_STF_MASK | RSPEC_STC_MASK);
/* For SISO MCS use STBC if possible */
if (is_mcs_rate(rspec[k])
&& BRCMS_STF_SS_STBC_TX(wlc, scb)) {
u8 stc;
/* Nss for single stream is always 1 */
stc = 1;
rspec[k] |= (PHY_TXC1_MODE_STBC <<
RSPEC_STF_SHIFT) |
(stc << RSPEC_STC_SHIFT);
} else
rspec[k] |=
(phyctl1_stf << RSPEC_STF_SHIFT);
}
/*
* Is the phy configured to use 40MHZ frames? If
* so then pick the desired txbw
*/
if (brcms_chspec_bw(wlc->chanspec) == BRCMS_40_MHZ) {
/* default txbw is 20in40 SB */
mimo_ctlchbw = mimo_txbw =
CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
wlc->band->pi))
? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
if (is_mcs_rate(rspec[k])) {
/* mcs 32 must be 40b/w DUP */
if ((rspec[k] & RSPEC_RATE_MASK)
== 32) {
mimo_txbw =
PHY_TXC1_BW_40MHZ_DUP;
/* use override */
} else if (wlc->mimo_40txbw != AUTO)
mimo_txbw = wlc->mimo_40txbw;
/* else check if dst is using 40 Mhz */
else if (scb->flags & SCB_IS40)
mimo_txbw = PHY_TXC1_BW_40MHZ;
} else if (is_ofdm_rate(rspec[k])) {
if (wlc->ofdm_40txbw != AUTO)
mimo_txbw = wlc->ofdm_40txbw;
} else if (wlc->cck_40txbw != AUTO) {
mimo_txbw = wlc->cck_40txbw;
}
} else {
/*
* mcs32 is 40 b/w only.
* This is possible for probe packets on
* a STA during SCAN
*/
if ((rspec[k] & RSPEC_RATE_MASK) == 32)
/* mcs 0 */
rspec[k] = RSPEC_MIMORATE;
mimo_txbw = PHY_TXC1_BW_20MHZ;
}
/* Set channel width */
rspec[k] &= ~RSPEC_BW_MASK;
if ((k == 0) || ((k > 0) && is_mcs_rate(rspec[k])))
rspec[k] |= (mimo_txbw << RSPEC_BW_SHIFT);
else
rspec[k] |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
/* Disable short GI, not supported yet */
rspec[k] &= ~RSPEC_SHORT_GI;
mimo_preamble_type = BRCMS_MM_PREAMBLE;
if (txrate[k]->flags & IEEE80211_TX_RC_GREEN_FIELD)
mimo_preamble_type = BRCMS_GF_PREAMBLE;
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!is_mcs_rate(rspec[k]))) {
brcms_warn(wlc->hw->d11core,
"wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n",
wlc->pub->unit, __func__);
}
if (is_mcs_rate(rspec[k])) {
preamble_type[k] = mimo_preamble_type;
/*
* if SGI is selected, then forced mm
* for single stream
*/
if ((rspec[k] & RSPEC_SHORT_GI)
&& is_single_stream(rspec[k] &
RSPEC_RATE_MASK))
preamble_type[k] = BRCMS_MM_PREAMBLE;
}
/* should be better conditionalized */
if (!is_mcs_rate(rspec[0])
&& (tx_info->control.rates[0].
flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
preamble_type[k] = BRCMS_SHORT_PREAMBLE;
}
} else {
for (k = 0; k < hw->max_rates; k++) {
/* Set ctrlchbw as 20Mhz */
rspec[k] &= ~RSPEC_BW_MASK;
rspec[k] |= (PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT);
/* for nphy, stf of ofdm frames must follow policies */
if (BRCMS_ISNPHY(wlc->band) && is_ofdm_rate(rspec[k])) {
rspec[k] &= ~RSPEC_STF_MASK;
rspec[k] |= phyctl1_stf << RSPEC_STF_SHIFT;
}
}
}
/* Reset these for use with AMPDU's */
txrate[0]->count = 0;
txrate[1]->count = 0;
/* (2) PROTECTION, may change rspec */
if ((ieee80211_is_data(h->frame_control) ||
ieee80211_is_mgmt(h->frame_control)) &&
(phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1))
use_rts = true;
/* (3) PLCP: determine PLCP header and MAC duration,
* fill struct d11txh */
brcms_c_compute_plcp(wlc, rspec[0], phylen, plcp);
brcms_c_compute_plcp(wlc, rspec[1], phylen, plcp_fallback);
memcpy(&txh->FragPLCPFallback,
plcp_fallback, sizeof(txh->FragPLCPFallback));
/* Length field now put in CCK FBR CRC field */
if (is_cck_rate(rspec[1])) {
txh->FragPLCPFallback[4] = phylen & 0xff;
txh->FragPLCPFallback[5] = (phylen & 0xff00) >> 8;
}
/* MIMO-RATE: need validation ?? */
mainrates = is_ofdm_rate(rspec[0]) ?
D11A_PHY_HDR_GRATE((struct ofdm_phy_hdr *) plcp) :
plcp[0];
/* DUR field for main rate */
if (!ieee80211_is_pspoll(h->frame_control) &&
!is_multicast_ether_addr(h->addr1) && !use_rifs) {
durid =
brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0],
next_frag_len);
h->duration_id = cpu_to_le16(durid);
} else if (use_rifs) {
/* NAV protect to end of next max packet size */
durid =
(u16) brcms_c_calc_frame_time(wlc, rspec[0],
preamble_type[0],
DOT11_MAX_FRAG_LEN);
durid += RIFS_11N_TIME;
h->duration_id = cpu_to_le16(durid);
}
/* DUR field for fallback rate */
if (ieee80211_is_pspoll(h->frame_control))
txh->FragDurFallback = h->duration_id;
else if (is_multicast_ether_addr(h->addr1) || use_rifs)
txh->FragDurFallback = 0;
else {
durid = brcms_c_compute_frame_dur(wlc, rspec[1],
preamble_type[1], next_frag_len);
txh->FragDurFallback = cpu_to_le16(durid);
}
/* (4) MAC-HDR: MacTxControlLow */
if (frag == 0)
mcl |= TXC_STARTMSDU;
if (!is_multicast_ether_addr(h->addr1))
mcl |= TXC_IMMEDACK;
if (wlc->band->bandtype == BRCM_BAND_5G)
mcl |= TXC_FREQBAND_5G;
if (CHSPEC_IS40(wlc_phy_chanspec_get(wlc->band->pi)))
mcl |= TXC_BW_40;
/* set AMIC bit if using hardware TKIP MIC */
if (hwtkmic)
mcl |= TXC_AMIC;
txh->MacTxControlLow = cpu_to_le16(mcl);
/* MacTxControlHigh */
mch = 0;
/* Set fallback rate preamble type */
if ((preamble_type[1] == BRCMS_SHORT_PREAMBLE) ||
(preamble_type[1] == BRCMS_GF_PREAMBLE)) {
if (rspec2rate(rspec[1]) != BRCM_RATE_1M)
mch |= TXC_PREAMBLE_DATA_FB_SHORT;
}
/* MacFrameControl */
memcpy(&txh->MacFrameControl, &h->frame_control, sizeof(u16));
txh->TxFesTimeNormal = cpu_to_le16(0);
txh->TxFesTimeFallback = cpu_to_le16(0);
/* TxFrameRA */
memcpy(&txh->TxFrameRA, &h->addr1, ETH_ALEN);
/* TxFrameID */
txh->TxFrameID = cpu_to_le16(frameid);
/*
* TxStatus, Note the case of recreating the first frag of a suppressed
* frame then we may need to reset the retry cnt's via the status reg
*/
txh->TxStatus = cpu_to_le16(status);
/*
* extra fields for ucode AMPDU aggregation, the new fields are added to
* the END of previous structure so that it's compatible in driver.
*/
txh->MaxNMpdus = cpu_to_le16(0);
txh->MaxABytes_MRT = cpu_to_le16(0);
txh->MaxABytes_FBR = cpu_to_le16(0);
txh->MinMBytes = cpu_to_le16(0);
/* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration,
* furnish struct d11txh */
/* RTS PLCP header and RTS frame */
if (use_rts || use_cts) {
if (use_rts && use_cts)
use_cts = false;
for (k = 0; k < 2; k++) {
rts_rspec[k] = brcms_c_rspec_to_rts_rspec(wlc, rspec[k],
false,
mimo_ctlchbw);
}
if (!is_ofdm_rate(rts_rspec[0]) &&
!((rspec2rate(rts_rspec[0]) == BRCM_RATE_1M) ||
(wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
rts_preamble_type[0] = BRCMS_SHORT_PREAMBLE;
mch |= TXC_PREAMBLE_RTS_MAIN_SHORT;
}
if (!is_ofdm_rate(rts_rspec[1]) &&
!((rspec2rate(rts_rspec[1]) == BRCM_RATE_1M) ||
(wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
rts_preamble_type[1] = BRCMS_SHORT_PREAMBLE;
mch |= TXC_PREAMBLE_RTS_FB_SHORT;
}
/* RTS/CTS additions to MacTxControlLow */
if (use_cts) {
txh->MacTxControlLow |= cpu_to_le16(TXC_SENDCTS);
} else {
txh->MacTxControlLow |= cpu_to_le16(TXC_SENDRTS);
txh->MacTxControlLow |= cpu_to_le16(TXC_LONGFRAME);
}
/* RTS PLCP header */
rts_plcp = txh->RTSPhyHeader;
if (use_cts)
rts_phylen = DOT11_CTS_LEN + FCS_LEN;
else
rts_phylen = DOT11_RTS_LEN + FCS_LEN;
brcms_c_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp);
/* fallback rate version of RTS PLCP header */
brcms_c_compute_plcp(wlc, rts_rspec[1], rts_phylen,
rts_plcp_fallback);
memcpy(&txh->RTSPLCPFallback, rts_plcp_fallback,
sizeof(txh->RTSPLCPFallback));
/* RTS frame fields... */
rts = (struct ieee80211_rts *)&txh->rts_frame;
durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[0],
rspec[0], rts_preamble_type[0],
preamble_type[0], phylen, false);
rts->duration = cpu_to_le16(durid);
/* fallback rate version of RTS DUR field */
durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
rts_rspec[1], rspec[1],
rts_preamble_type[1],
preamble_type[1], phylen, false);
txh->RTSDurFallback = cpu_to_le16(durid);
if (use_cts) {
rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_CTS);
memcpy(&rts->ra, &h->addr2, ETH_ALEN);
} else {
rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN);
}
/* mainrate
* low 8 bits: main frag rate/mcs,
* high 8 bits: rts/cts rate/mcs
*/
mainrates |= (is_ofdm_rate(rts_rspec[0]) ?
D11A_PHY_HDR_GRATE(
(struct ofdm_phy_hdr *) rts_plcp) :
rts_plcp[0]) << 8;
} else {
memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts));
memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback));
txh->RTSDurFallback = 0;
}
#ifdef SUPPORT_40MHZ
/* add null delimiter count */
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && is_mcs_rate(rspec))
txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] =
brcm_c_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen);
#endif
/*
* Now that RTS/RTS FB preamble types are updated, write
* the final value
*/
txh->MacTxControlHigh = cpu_to_le16(mch);
/*
* MainRates (both the rts and frag plcp rates have
* been calculated now)
*/
txh->MainRates = cpu_to_le16(mainrates);
/* XtraFrameTypes */
xfts = frametype(rspec[1], wlc->mimoft);
xfts |= (frametype(rts_rspec[0], wlc->mimoft) << XFTS_RTS_FT_SHIFT);
xfts |= (frametype(rts_rspec[1], wlc->mimoft) << XFTS_FBRRTS_FT_SHIFT);
xfts |= CHSPEC_CHANNEL(wlc_phy_chanspec_get(wlc->band->pi)) <<
XFTS_CHANNEL_SHIFT;
txh->XtraFrameTypes = cpu_to_le16(xfts);
/* PhyTxControlWord */
phyctl = frametype(rspec[0], wlc->mimoft);
if ((preamble_type[0] == BRCMS_SHORT_PREAMBLE) ||
(preamble_type[0] == BRCMS_GF_PREAMBLE)) {
if (rspec2rate(rspec[0]) != BRCM_RATE_1M)
phyctl |= PHY_TXC_SHORT_HDR;
}
/* phytxant is properly bit shifted */
phyctl |= brcms_c_stf_d11hdrs_phyctl_txant(wlc, rspec[0]);
txh->PhyTxControlWord = cpu_to_le16(phyctl);
/* PhyTxControlWord_1 */
if (BRCMS_PHY_11N_CAP(wlc->band)) {
u16 phyctl1 = 0;
phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[0]);
txh->PhyTxControlWord_1 = cpu_to_le16(phyctl1);
phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[1]);
txh->PhyTxControlWord_1_Fbr = cpu_to_le16(phyctl1);
if (use_rts || use_cts) {
phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[0]);
txh->PhyTxControlWord_1_Rts = cpu_to_le16(phyctl1);
phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[1]);
txh->PhyTxControlWord_1_FbrRts = cpu_to_le16(phyctl1);
}
/*
* For mcs frames, if mixedmode(overloaded with long preamble)
* is going to be set, fill in non-zero MModeLen and/or
* MModeFbrLen it will be unnecessary if they are separated
*/
if (is_mcs_rate(rspec[0]) &&
(preamble_type[0] == BRCMS_MM_PREAMBLE)) {
u16 mmodelen =
brcms_c_calc_lsig_len(wlc, rspec[0], phylen);
txh->MModeLen = cpu_to_le16(mmodelen);
}
if (is_mcs_rate(rspec[1]) &&
(preamble_type[1] == BRCMS_MM_PREAMBLE)) {
u16 mmodefbrlen =
brcms_c_calc_lsig_len(wlc, rspec[1], phylen);
txh->MModeFbrLen = cpu_to_le16(mmodefbrlen);
}
}
ac = skb_get_queue_mapping(p);
if ((scb->flags & SCB_WMECAP) && qos && wlc->edcf_txop[ac]) {
uint frag_dur, dur, dur_fallback;
/* WME: Update TXOP threshold */
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) && frag == 0) {
frag_dur =
brcms_c_calc_frame_time(wlc, rspec[0],
preamble_type[0], phylen);
if (rts) {
/* 1 RTS or CTS-to-self frame */
dur =
brcms_c_calc_cts_time(wlc, rts_rspec[0],
rts_preamble_type[0]);
dur_fallback =
brcms_c_calc_cts_time(wlc, rts_rspec[1],
rts_preamble_type[1]);
/* (SIFS + CTS) + SIFS + frame + SIFS + ACK */
dur += le16_to_cpu(rts->duration);
dur_fallback +=
le16_to_cpu(txh->RTSDurFallback);
} else if (use_rifs) {
dur = frag_dur;
dur_fallback = 0;
} else {
/* frame + SIFS + ACK */
dur = frag_dur;
dur +=
brcms_c_compute_frame_dur(wlc, rspec[0],
preamble_type[0], 0);
dur_fallback =
brcms_c_calc_frame_time(wlc, rspec[1],
preamble_type[1],
phylen);
dur_fallback +=
brcms_c_compute_frame_dur(wlc, rspec[1],
preamble_type[1], 0);
}
/* NEED to set TxFesTimeNormal (hard) */
txh->TxFesTimeNormal = cpu_to_le16((u16) dur);
/*
* NEED to set fallback rate version of
* TxFesTimeNormal (hard)
*/
txh->TxFesTimeFallback =
cpu_to_le16((u16) dur_fallback);
/*
* update txop byte threshold (txop minus intraframe
* overhead)
*/
if (wlc->edcf_txop[ac] >= (dur - frag_dur)) {
uint newfragthresh;
newfragthresh =
brcms_c_calc_frame_len(wlc,
rspec[0], preamble_type[0],
(wlc->edcf_txop[ac] -
(dur - frag_dur)));
/* range bound the fragthreshold */
if (newfragthresh < DOT11_MIN_FRAG_LEN)
newfragthresh =
DOT11_MIN_FRAG_LEN;
else if (newfragthresh >
wlc->usr_fragthresh)
newfragthresh =
wlc->usr_fragthresh;
/* update the fragthresh and do txc update */
if (wlc->fragthresh[queue] !=
(u16) newfragthresh)
wlc->fragthresh[queue] =
(u16) newfragthresh;
} else {
brcms_warn(wlc->hw->d11core,
"wl%d: %s txop invalid for rate %d\n",
wlc->pub->unit, fifo_names[queue],
rspec2rate(rspec[0]));
}
if (dur > wlc->edcf_txop[ac])
brcms_warn(wlc->hw->d11core,
"wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
wlc->pub->unit, __func__,
fifo_names[queue],
phylen, wlc->fragthresh[queue],
dur, wlc->edcf_txop[ac]);
}
}
return 0;
}
static int brcms_c_tx(struct brcms_c_info *wlc, struct sk_buff *skb)
{
struct dma_pub *dma;
int fifo, ret = -ENOSPC;
struct d11txh *txh;
u16 frameid = INVALIDFID;
fifo = brcms_ac_to_fifo(skb_get_queue_mapping(skb));
dma = wlc->hw->di[fifo];
txh = (struct d11txh *)(skb->data);
if (dma->txavail == 0) {
/*
* We sometimes get a frame from mac80211 after stopping
* the queues. This only ever seems to be a single frame
* and is seems likely to be a race. TX_HEADROOM should
* ensure that we have enough space to handle these stray
* packets, so warn if there isn't. If we're out of space
* in the tx ring and the tx queue isn't stopped then
* we've really got a bug; warn loudly if that happens.
*/
brcms_warn(wlc->hw->d11core,
"Received frame for tx with no space in DMA ring\n");
WARN_ON(!ieee80211_queue_stopped(wlc->pub->ieee_hw,
skb_get_queue_mapping(skb)));
return -ENOSPC;
}
/* When a BC/MC frame is being committed to the BCMC fifo
* via DMA (NOT PIO), update ucode or BSS info as appropriate.
*/
if (fifo == TX_BCMC_FIFO)
frameid = le16_to_cpu(txh->TxFrameID);
/* Commit BCMC sequence number in the SHM frame ID location */
if (frameid != INVALIDFID) {
/*
* To inform the ucode of the last mcast frame posted
* so that it can clear moredata bit
*/
brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid);
}
ret = brcms_c_txfifo(wlc, fifo, skb);
/*
* The only reason for brcms_c_txfifo to fail is because
* there weren't any DMA descriptors, but we've already
* checked for that. So if it does fail yell loudly.
*/
WARN_ON_ONCE(ret);
return ret;
}
bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
struct ieee80211_hw *hw)
{
uint fifo;
struct scb *scb = &wlc->pri_scb;
fifo = brcms_ac_to_fifo(skb_get_queue_mapping(sdu));
brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0);
if (!brcms_c_tx(wlc, sdu))
return true;
/* packet discarded */
dev_kfree_skb_any(sdu);
return false;
}
int
brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p)
{
struct dma_pub *dma = wlc->hw->di[fifo];
int ret;
u16 queue;
ret = dma_txfast(wlc, dma, p);
if (ret < 0)
wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
/*
* Stop queue if DMA ring is full. Reserve some free descriptors,
* as we sometimes receive a frame from mac80211 after the queues
* are stopped.
*/
queue = skb_get_queue_mapping(p);
if (dma->txavail <= TX_HEADROOM && fifo < TX_BCMC_FIFO &&
!ieee80211_queue_stopped(wlc->pub->ieee_hw, queue))
ieee80211_stop_queue(wlc->pub->ieee_hw, queue);
return ret;
}
u32
brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
bool use_rspec, u16 mimo_ctlchbw)
{
u32 rts_rspec = 0;
if (use_rspec)
/* use frame rate as rts rate */
rts_rspec = rspec;
else if (wlc->band->gmode && wlc->protection->_g && !is_cck_rate(rspec))
/* Use 11Mbps as the g protection RTS target rate and fallback.
* Use the brcms_basic_rate() lookup to find the best basic rate
* under the target in case 11 Mbps is not Basic.
* 6 and 9 Mbps are not usually selected by rate selection, but
* even if the OFDM rate we are protecting is 6 or 9 Mbps, 11
* is more robust.
*/
rts_rspec = brcms_basic_rate(wlc, BRCM_RATE_11M);
else
/* calculate RTS rate and fallback rate based on the frame rate
* RTS must be sent at a basic rate since it is a
* control frame, sec 9.6 of 802.11 spec
*/
rts_rspec = brcms_basic_rate(wlc, rspec);
if (BRCMS_PHY_11N_CAP(wlc->band)) {
/* set rts txbw to correct side band */
rts_rspec &= ~RSPEC_BW_MASK;
/*
* if rspec/rspec_fallback is 40MHz, then send RTS on both
* 20MHz channel (DUP), otherwise send RTS on control channel
*/
if (rspec_is40mhz(rspec) && !is_cck_rate(rts_rspec))
rts_rspec |= (PHY_TXC1_BW_40MHZ_DUP << RSPEC_BW_SHIFT);
else
rts_rspec |= (mimo_ctlchbw << RSPEC_BW_SHIFT);
/* pick siso/cdd as default for ofdm */
if (is_ofdm_rate(rts_rspec)) {
rts_rspec &= ~RSPEC_STF_MASK;
rts_rspec |= (wlc->stf->ss_opmode << RSPEC_STF_SHIFT);
}
}
return rts_rspec;
}
/* Update beacon listen interval in shared memory */
static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
{
/* wake up every DTIM is the default */
if (wlc->bcn_li_dtim == 1)
brcms_b_write_shm(wlc->hw, M_BCN_LI, 0);
else
brcms_b_write_shm(wlc->hw, M_BCN_LI,
(wlc->bcn_li_dtim << 8) | wlc->bcn_li_bcn);
}
static void
brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
struct bcma_device *core = wlc_hw->d11core;
/* read the tsf timer low, then high to get an atomic read */
*tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
*tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
}
/*
* recover 64bit TSF value from the 16bit TSF value in the rx header
* given the assumption that the TSF passed in header is within 65ms
* of the current tsf.
*
* 6 5 4 4 3 2 1
* 3.......6.......8.......0.......2.......4.......6.......8......0
* |<---------- tsf_h ----------->||<--- tsf_l -->||<-RxTSFTime ->|
*
* The RxTSFTime are the lowest 16 bits and provided by the ucode. The
* tsf_l is filled in by brcms_b_recv, which is done earlier in the
* receive call sequence after rx interrupt. Only the higher 16 bits
* are used. Finally, the tsf_h is read from the tsf register.
*/
static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
struct d11rxhdr *rxh)
{
u32 tsf_h, tsf_l;
u16 rx_tsf_0_15, rx_tsf_16_31;
brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
rx_tsf_16_31 = (u16)(tsf_l >> 16);
rx_tsf_0_15 = rxh->RxTSFTime;
/*
* a greater tsf time indicates the low 16 bits of
* tsf_l wrapped, so decrement the high 16 bits.
*/
if ((u16)tsf_l < rx_tsf_0_15) {
rx_tsf_16_31 -= 1;
if (rx_tsf_16_31 == 0xffff)
tsf_h -= 1;
}
return ((u64)tsf_h << 32) | (((u32)rx_tsf_16_31 << 16) + rx_tsf_0_15);
}
static void
prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
struct sk_buff *p,
struct ieee80211_rx_status *rx_status)
{
int preamble;
int channel;
u32 rspec;
unsigned char *plcp;
/* fill in TSF and flag its presence */
rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh);
rx_status->flag |= RX_FLAG_MACTIME_START;
channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
rx_status->band =
channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
rx_status->freq =
ieee80211_channel_to_frequency(channel, rx_status->band);
rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
/* noise */
/* qual */
rx_status->antenna =
(rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0;
plcp = p->data;
rspec = brcms_c_compute_rspec(rxh, plcp);
if (is_mcs_rate(rspec)) {
rx_status->rate_idx = rspec & RSPEC_RATE_MASK;
rx_status->flag |= RX_FLAG_HT;
if (rspec_is40mhz(rspec))
rx_status->flag |= RX_FLAG_40MHZ;
} else {
switch (rspec2rate(rspec)) {
case BRCM_RATE_1M:
rx_status->rate_idx = 0;
break;
case BRCM_RATE_2M:
rx_status->rate_idx = 1;
break;
case BRCM_RATE_5M5:
rx_status->rate_idx = 2;
break;
case BRCM_RATE_11M:
rx_status->rate_idx = 3;
break;
case BRCM_RATE_6M:
rx_status->rate_idx = 4;
break;
case BRCM_RATE_9M:
rx_status->rate_idx = 5;
break;
case BRCM_RATE_12M:
rx_status->rate_idx = 6;
break;
case BRCM_RATE_18M:
rx_status->rate_idx = 7;
break;
case BRCM_RATE_24M:
rx_status->rate_idx = 8;
break;
case BRCM_RATE_36M:
rx_status->rate_idx = 9;
break;
case BRCM_RATE_48M:
rx_status->rate_idx = 10;
break;
case BRCM_RATE_54M:
rx_status->rate_idx = 11;
break;
default:
brcms_err(wlc->hw->d11core,
"%s: Unknown rate\n", __func__);
}
/*
* For 5GHz, we should decrease the index as it is
* a subset of the 2.4G rates. See bitrates field
* of brcms_band_5GHz_nphy (in mac80211_if.c).
*/
if (rx_status->band == IEEE80211_BAND_5GHZ)
rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
/* Determine short preamble and rate_idx */
preamble = 0;
if (is_cck_rate(rspec)) {
if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
rx_status->flag |= RX_FLAG_SHORTPRE;
} else if (is_ofdm_rate(rspec)) {
rx_status->flag |= RX_FLAG_SHORTPRE;
} else {
brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n",
__func__);
}
}
if (plcp3_issgi(plcp[3]))
rx_status->flag |= RX_FLAG_SHORT_GI;
if (rxh->RxStatus1 & RXS_DECERR) {
rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
__func__);
}
if (rxh->RxStatus1 & RXS_FCSERR) {
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_FCS_CRC\n",
__func__);
}
}
static void
brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
struct sk_buff *p)
{
int len_mpdu;
struct ieee80211_rx_status rx_status;
struct ieee80211_hdr *hdr;
memset(&rx_status, 0, sizeof(rx_status));
prep_mac80211_status(wlc, rxh, p, &rx_status);
/* mac header+body length, exclude CRC and plcp header */
len_mpdu = p->len - D11_PHY_HDR_LEN - FCS_LEN;
skb_pull(p, D11_PHY_HDR_LEN);
__skb_trim(p, len_mpdu);
/* unmute transmit */
if (wlc->hw->suspended_fifos) {
hdr = (struct ieee80211_hdr *)p->data;
if (ieee80211_is_beacon(hdr->frame_control))
brcms_b_mute(wlc->hw, false);
}
memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
}
/* calculate frame duration for Mixed-mode L-SIG spoofing, return
* number of bytes goes in the length field
*
* Formula given by HT PHY Spec v 1.13
* len = 3(nsyms + nstream + 3) - 3
*/
u16
brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
uint mac_len)
{
uint nsyms, len = 0, kNdps;
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = (mcs_2_txstreams(mcs) + 1) +
rspec_stc(ratespec);
/*
* the payload duration calculation matches that
* of regular ofdm
*/
/* 1000Ndbps = kbps * 4 */
kNdps = mcs_2_rate(mcs, rspec_is40mhz(ratespec),
rspec_issgi(ratespec)) * 4;
if (rspec_stc(ratespec) == 0)
nsyms =
CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
APHY_TAIL_NBITS) * 1000, kNdps);
else
/* STBC needs to have even number of symbols */
nsyms =
2 *
CEIL((APHY_SERVICE_NBITS + 8 * mac_len +
APHY_TAIL_NBITS) * 1000, 2 * kNdps);
/* (+3) account for HT-SIG(2) and HT-STF(1) */
nsyms += (tot_streams + 3);
/*
* 3 bytes/symbol @ legacy 6Mbps rate
* (-3) excluding service bits and tail bits
*/
len = (3 * nsyms) - 3;
}
return (u16) len;
}
static void
brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len)
{
const struct brcms_c_rateset *rs_dflt;
struct brcms_c_rateset rs;
u8 rate;
u16 entry_ptr;
u8 plcp[D11_PHY_HDR_LEN];
u16 dur, sifs;
uint i;
sifs = get_sifs(wlc->band);
rs_dflt = brcms_c_rateset_get_hwrs(wlc);
brcms_c_rateset_copy(rs_dflt, &rs);
brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
/*
* walk the phy rate table and update MAC core SHM
* basic rate table entries
*/
for (i = 0; i < rs.count; i++) {
rate = rs.rates[i] & BRCMS_RATE_MASK;
entry_ptr = brcms_b_rate_shm_offset(wlc->hw, rate);
/* Calculate the Probe Response PLCP for the given rate */
brcms_c_compute_plcp(wlc, rate, frame_len, plcp);
/*
* Calculate the duration of the Probe Response
* frame plus SIFS for the MAC
*/
dur = (u16) brcms_c_calc_frame_time(wlc, rate,
BRCMS_LONG_PREAMBLE, frame_len);
dur += sifs;
/* Update the SHM Rate Table entry Probe Response values */
brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS,
(u16) (plcp[0] + (plcp[1] << 8)));
brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_PLCP_POS + 2,
(u16) (plcp[2] + (plcp[3] << 8)));
brcms_b_write_shm(wlc->hw, entry_ptr + M_RT_PRS_DUR_POS, dur);
}
}
int brcms_c_get_header_len(void)
{
return TXOFF;
}
static void brcms_c_beacon_write(struct brcms_c_info *wlc,
struct sk_buff *beacon, u16 tim_offset,
u16 dtim_period, bool bcn0, bool bcn1)
{
size_t len;
struct ieee80211_tx_info *tx_info;
struct brcms_hardware *wlc_hw = wlc->hw;
struct ieee80211_hw *ieee_hw = brcms_c_pub(wlc)->ieee_hw;
/* Get tx_info */
tx_info = IEEE80211_SKB_CB(beacon);
len = min_t(size_t, beacon->len, BCN_TMPL_LEN);
wlc->bcn_rspec = ieee80211_get_tx_rate(ieee_hw, tx_info)->hw_value;
brcms_c_compute_plcp(wlc, wlc->bcn_rspec,
len + FCS_LEN - D11_PHY_HDR_LEN, beacon->data);
/* "Regular" and 16 MBSS but not for 4 MBSS */
/* Update the phytxctl for the beacon based on the rspec */
brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
if (bcn0) {
/* write the probe response into the template region */
brcms_b_write_template_ram(wlc_hw, T_BCN0_TPL_BASE,
(len + 3) & ~3, beacon->data);
/* write beacon length to SCR */
brcms_b_write_shm(wlc_hw, M_BCN0_FRM_BYTESZ, (u16) len);
}
if (bcn1) {
/* write the probe response into the template region */
brcms_b_write_template_ram(wlc_hw, T_BCN1_TPL_BASE,
(len + 3) & ~3, beacon->data);
/* write beacon length to SCR */
brcms_b_write_shm(wlc_hw, M_BCN1_FRM_BYTESZ, (u16) len);
}
if (tim_offset != 0) {
brcms_b_write_shm(wlc_hw, M_TIMBPOS_INBEACON,
tim_offset + D11B_PHY_HDR_LEN);
brcms_b_write_shm(wlc_hw, M_DOT11_DTIMPERIOD, dtim_period);
} else {
brcms_b_write_shm(wlc_hw, M_TIMBPOS_INBEACON,
len + D11B_PHY_HDR_LEN);
brcms_b_write_shm(wlc_hw, M_DOT11_DTIMPERIOD, 0);
}
}
static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
struct sk_buff *beacon, u16 tim_offset,
u16 dtim_period)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
/* Hardware beaconing for this config */
u32 both_valid = MCMD_BCN0VLD | MCMD_BCN1VLD;
/* Check if both templates are in use, if so sched. an interrupt
* that will call back into this routine
*/
if ((bcma_read32(core, D11REGOFFS(maccommand)) & both_valid) == both_valid)
/* clear any previous status */
bcma_write32(core, D11REGOFFS(macintstatus), MI_BCNTPL);
if (wlc->beacon_template_virgin) {
wlc->beacon_template_virgin = false;
brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period, true,
true);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN0VLD);
return;
}
/* Check that after scheduling the interrupt both of the
* templates are still busy. if not clear the int. & remask
*/
if ((bcma_read32(core, D11REGOFFS(maccommand)) & both_valid) == both_valid) {
wlc->defmacintmask |= MI_BCNTPL;
return;
}
if (!(bcma_read32(core, D11REGOFFS(maccommand)) & MCMD_BCN0VLD)) {
brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period, true,
false);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN0VLD);
return;
}
if (!(bcma_read32(core, D11REGOFFS(maccommand)) & MCMD_BCN1VLD)) {
brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period,
false, true);
/* mark beacon0 valid */
bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
return;
}
return;
}
/*
* Update all beacons for the system.
*/
void brcms_c_update_beacon(struct brcms_c_info *wlc)
{
struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
if (wlc->pub->up && (bsscfg->type == BRCMS_TYPE_AP ||
bsscfg->type == BRCMS_TYPE_ADHOC)) {
/* Clear the soft intmask */
wlc->defmacintmask &= ~MI_BCNTPL;
if (!wlc->beacon)
return;
brcms_c_update_beacon_hw(wlc, wlc->beacon,
wlc->beacon_tim_offset,
wlc->beacon_dtim_period);
}
}
void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
u16 tim_offset, u16 dtim_period)
{
if (!beacon)
return;
if (wlc->beacon)
dev_kfree_skb_any(wlc->beacon);
wlc->beacon = beacon;
/* add PLCP */
skb_push(wlc->beacon, D11_PHY_HDR_LEN);
wlc->beacon_tim_offset = tim_offset;
wlc->beacon_dtim_period = dtim_period;
brcms_c_update_beacon(wlc);
}
void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
struct sk_buff *probe_resp)
{
if (!probe_resp)
return;
if (wlc->probe_resp)
dev_kfree_skb_any(wlc->probe_resp);
wlc->probe_resp = probe_resp;
/* add PLCP */
skb_push(wlc->probe_resp, D11_PHY_HDR_LEN);
brcms_c_update_probe_resp(wlc, false);
}
void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable)
{
/*
* prevent ucode from sending probe responses by setting the timeout
* to 1, it can not send it in that time frame.
*/
wlc->prb_resp_timeout = enable ? BRCMS_PRB_RESP_TIMEOUT : 1;
brcms_b_write_shm(wlc->hw, M_PRS_MAXTIME, wlc->prb_resp_timeout);
/* TODO: if (enable) => also deactivate receiving of probe request */
}
/* Write ssid into shared memory */
static void
brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg)
{
u8 *ssidptr = cfg->SSID;
u16 base = M_SSID;
u8 ssidbuf[IEEE80211_MAX_SSID_LEN];
/* padding the ssid with zero and copy it into shm */
memset(ssidbuf, 0, IEEE80211_MAX_SSID_LEN);
memcpy(ssidbuf, ssidptr, cfg->SSID_len);
brcms_c_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN);
brcms_b_write_shm(wlc->hw, M_SSIDLEN, (u16) cfg->SSID_len);
}
static void
brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
struct brcms_bss_cfg *cfg,
struct sk_buff *probe_resp,
bool suspend)
{
int len;
len = min_t(size_t, probe_resp->len, BCN_TMPL_LEN);
if (suspend)
brcms_c_suspend_mac_and_wait(wlc);
/* write the probe response into the template region */
brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
(len + 3) & ~3, probe_resp->data);
/* write the length of the probe response frame (+PLCP/-FCS) */
brcms_b_write_shm(wlc->hw, M_PRB_RESP_FRM_LEN, (u16) len);
/* write the SSID and SSID length */
brcms_c_shm_ssid_upd(wlc, cfg);
/*
* Write PLCP headers and durations for probe response frames
* at all rates. Use the actual frame length covered by the
* PLCP header for the call to brcms_c_mod_prb_rsp_rate_table()
* by subtracting the PLCP len and adding the FCS.
*/
brcms_c_mod_prb_rsp_rate_table(wlc,
(u16)len + FCS_LEN - D11_PHY_HDR_LEN);
if (suspend)
brcms_c_enable_mac(wlc);
}
void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
{
struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
/* update AP or IBSS probe responses */
if (wlc->pub->up && (bsscfg->type == BRCMS_TYPE_AP ||
bsscfg->type == BRCMS_TYPE_ADHOC)) {
if (!wlc->probe_resp)
return;
brcms_c_bss_update_probe_resp(wlc, bsscfg, wlc->probe_resp,
suspend);
}
}
int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
uint *blocks)
{
if (fifo >= NFIFO)
return -EINVAL;
*blocks = wlc_hw->xmtfifo_sz[fifo];
return 0;
}
void
brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
const u8 *addr)
{
brcms_b_set_addrmatch(wlc->hw, match_reg_offset, addr);
if (match_reg_offset == RCM_BSSID_OFFSET)
memcpy(wlc->bsscfg->BSSID, addr, ETH_ALEN);
}
/*
* Flag 'scan in progress' to withhold dynamic phy calibration
*/
void brcms_c_scan_start(struct brcms_c_info *wlc)
{
wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
}
void brcms_c_scan_stop(struct brcms_c_info *wlc)
{
wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
}
void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state)
{
wlc->pub->associated = state;
}
/*
* When a remote STA/AP is removed by Mac80211, or when it can no longer accept
* AMPDU traffic, packets pending in hardware have to be invalidated so that
* when later on hardware releases them, they can be handled appropriately.
*/
void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
struct ieee80211_sta *sta,
void (*dma_callback_fn))
{
struct dma_pub *dmah;
int i;
for (i = 0; i < NFIFO; i++) {
dmah = hw->di[i];
if (dmah != NULL)
dma_walk_packets(dmah, dma_callback_fn, sta);
}
}
int brcms_c_get_curband(struct brcms_c_info *wlc)
{
return wlc->band->bandunit;
}
bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
dma_kick_tx(wlc->hw->di[i]);
return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
{
wlc->bcn_li_bcn = interval;
if (wlc->pub->up)
brcms_c_bcn_li_upd(wlc);
}
u64 brcms_c_tsf_get(struct brcms_c_info *wlc)
{
u32 tsf_h, tsf_l;
u64 tsf;
brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
tsf = tsf_h;
tsf <<= 32;
tsf |= tsf_l;
return tsf;
}
void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf)
{
u32 tsf_h, tsf_l;
brcms_c_time_lock(wlc);
tsf_l = tsf;
tsf_h = (tsf >> 32);
/* read the tsf timer low, then high to get an atomic read */
bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_timerlow), tsf_l);
bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_timerhigh), tsf_h);
brcms_c_time_unlock(wlc);
}
int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr)
{
uint qdbm;
/* Remove override bit and clip to max qdbm value */
qdbm = min_t(uint, txpwr * BRCMS_TXPWR_DB_FACTOR, 0xff);
return wlc_phy_txpower_set(wlc->band->pi, qdbm, false);
}
int brcms_c_get_tx_power(struct brcms_c_info *wlc)
{
uint qdbm;
bool override;
wlc_phy_txpower_get(wlc->band->pi, &qdbm, &override);
/* Return qdbm units */
return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
}
/* Process received frames */
/*
* Return true if more frames need to be processed. false otherwise.
* Param 'bound' indicates max. # frames to process before break out.
*/
static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
{
struct d11rxhdr *rxh;
struct ieee80211_hdr *h;
uint len;
bool is_amsdu;
/* frame starts with rxhdr */
rxh = (struct d11rxhdr *) (p->data);
/* strip off rxhdr */
skb_pull(p, BRCMS_HWRXOFF);
/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
if (rxh->RxStatus1 & RXS_PBPRES) {
if (p->len < 2) {
brcms_err(wlc->hw->d11core,
"wl%d: recv: rcvd runt of len %d\n",
wlc->pub->unit, p->len);
goto toss;
}
skb_pull(p, 2);
}
h = (struct ieee80211_hdr *)(p->data + D11_PHY_HDR_LEN);
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
}
/* check received pkt has at least frame control field */
if (len < D11_PHY_HDR_LEN + sizeof(h->frame_control))
goto toss;
/* not supporting A-MSDU */
is_amsdu = rxh->RxStatus2 & RXS_AMSDU_MASK;
if (is_amsdu)
goto toss;
brcms_c_recvctl(wlc, rxh, p);
return;
toss:
brcmu_pkt_buf_free_skb(p);
}
/* Process received frames */
/*
* Return true if more frames need to be processed. false otherwise.
* Param 'bound' indicates max. # frames to process before break out.
*/
static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
struct sk_buff *next = NULL;
struct sk_buff_head recv_frames;
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
bool morepending = false;
skb_queue_head_init(&recv_frames);
/* gather received frames */
do {
/* !give others some time to run! */
if (n >= bound_limit)
break;
morepending = dma_rx(wlc_hw->di[fifo], &recv_frames);
n++;
} while (morepending);
/* post more rbufs */
dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */
skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh;
skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data;
/* fixup rx header endianness */
rxh->RxFrameSize = le16_to_cpu(rxh_le->RxFrameSize);
rxh->PhyRxStatus_0 = le16_to_cpu(rxh_le->PhyRxStatus_0);
rxh->PhyRxStatus_1 = le16_to_cpu(rxh_le->PhyRxStatus_1);
rxh->PhyRxStatus_2 = le16_to_cpu(rxh_le->PhyRxStatus_2);
rxh->PhyRxStatus_3 = le16_to_cpu(rxh_le->PhyRxStatus_3);
rxh->PhyRxStatus_4 = le16_to_cpu(rxh_le->PhyRxStatus_4);
rxh->PhyRxStatus_5 = le16_to_cpu(rxh_le->PhyRxStatus_5);
rxh->RxStatus1 = le16_to_cpu(rxh_le->RxStatus1);
rxh->RxStatus2 = le16_to_cpu(rxh_le->RxStatus2);
rxh->RxTSFTime = le16_to_cpu(rxh_le->RxTSFTime);
rxh->RxChan = le16_to_cpu(rxh_le->RxChan);
brcms_c_recv(wlc_hw->wlc, p);
}
return morepending;
}
/* second-level interrupt processing
* Return true if another dpc needs to be re-scheduled. false otherwise.
* Param 'bounded' indicates if applicable loops should be bounded.
*/
bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
if (brcms_deviceremoved(wlc)) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return false;
}
/* grab and clear the saved software intstatus bits */
macintstatus = wlc->macintstatus;
wlc->macintstatus = 0;
brcms_dbg_int(core, "wl%d: macintstatus 0x%x\n",
wlc_hw->unit, macintstatus);
WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
/* tx status */
if (macintstatus & MI_TFS) {
bool fatal;
if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
wlc->macintstatus |= MI_TFS;
if (fatal) {
brcms_err(core, "MI_TFS: fatal\n");
goto fatal;
}
}
if (macintstatus & (MI_TBTT | MI_DTIM_TBTT))
brcms_c_tbtt(wlc);
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
brcms_dbg_info(core, "end of ATIM window\n");
bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
/*
* received data or control frame, MI_DMAINT is
* indication of RX_FIFO interrupt
*/
if (macintstatus & MI_DMAINT)
if (brcms_b_recv(wlc_hw, RX_FIFO, bounded))
wlc->macintstatus |= MI_DMAINT;
/* noise sample collected */
if (macintstatus & MI_BG_NOISE)
wlc_phy_noise_sample_intr(wlc_hw->band->pi);
if (macintstatus & MI_GP0) {
brcms_err(core, "wl%d: PSM microcode watchdog fired at %d "
"(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
__func__, ai_get_chip_id(wlc_hw->sih),
ai_get_chiprev(wlc_hw->sih));
brcms_fatal_error(wlc_hw->wlc->wl);
}
/* gptimer timeout */
if (macintstatus & MI_TO)
bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
brcms_dbg_info(core, "wl%d: BMAC Detected a change on the"
" RF Disable Input\n", wlc_hw->unit);
brcms_rfkill_set_hw_state(wlc->wl);
}
/* BCN template is available */
if (macintstatus & MI_BCNTPL)
brcms_c_update_beacon(wlc);
/* it isn't done and needs to be resched if macintstatus is non-zero */
return wlc->macintstatus != 0;
fatal:
brcms_fatal_error(wlc_hw->wlc->wl);
return wlc->macintstatus != 0;
}
void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
struct bcma_device *core = wlc->hw->d11core;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
u16 chanspec;
brcms_dbg_info(core, "wl%d\n", wlc->pub->unit);
chanspec = ch20mhz_chspec(ch->hw_value);
brcms_b_init(wlc->hw, chanspec);
/* update beacon listen interval */
brcms_c_bcn_li_upd(wlc);
/* write ethernet address to core */
brcms_c_set_mac(wlc->bsscfg);
brcms_c_set_bssid(wlc->bsscfg);
/* Update tsf_cfprep if associated and up */
if (wlc->pub->associated && wlc->pub->up) {
u32 bi;
/* get beacon period and convert to uS */
bi = wlc->bsscfg->current_bss->beacon_period << 10;
/*
* update since init path would reset
* to default value
*/
bcma_write32(core, D11REGOFFS(tsf_cfprep),
bi << CFPREP_CBI_SHIFT);
/* Update maccontrol PM related bits */
brcms_c_set_ps_ctrl(wlc);
}
brcms_c_bandinit_ordered(wlc, chanspec);
/* init probe response timeout */
brcms_b_write_shm(wlc->hw, M_PRS_MAXTIME, wlc->prb_resp_timeout);
/* init max burst txop (framebursting) */
brcms_b_write_shm(wlc->hw, M_MBURST_TXOP,
(wlc->
_rifs ? (EDCF_AC_VO_TXOP_AP << 5) : MAXFRAMEBURST_TXOP));
/* initialize maximum allowed duty cycle */
brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true);
brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true);
/*
* Update some shared memory locations related to
* max AMPDU size allowed to received
*/
brcms_c_ampdu_shm_upd(wlc->ampdu);
/* band-specific inits */
brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
/* read the ucode version if we have not yet done so */
if (wlc->ucode_rev == 0) {
u16 rev;
u16 patch;
rev = brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR);
patch = brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
wlc->ucode_rev = (rev << NBITS(u16)) | patch;
snprintf(wlc->wiphy->fw_version,
sizeof(wlc->wiphy->fw_version), "%u.%u", rev, patch);
}
/* ..now really unleash hell (allow the MAC out of suspend) */
brcms_c_enable_mac(wlc);
/* suspend the tx fifos and mute the phy for preism cac time */
if (mute_tx)
brcms_b_mute(wlc->hw, true);
/* enable the RF Disable Delay timer */
bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
/*
* Initialize WME parameters; if they haven't been set by some other
* mechanism (IOVar, etc) then read them from the hardware.
*/
if (GFIELD(wlc->wme_retries[0], EDCF_SHORT) == 0) {
/* Uninitialized; read from HW */
int ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
wlc->wme_retries[ac] =
brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
}
}
/*
* The common driver entry routine. Error codes should be unique
*/
struct brcms_c_info *
brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
bool piomode, uint *perr)
{
struct brcms_c_info *wlc;
uint err = 0;
uint i, j;
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
wlc = brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
pub = wlc->pub;
#if defined(DEBUG)
wlc_info_dbg = wlc;
#endif
wlc->band = wlc->bandstate[0];
wlc->core = wlc->corestate;
wlc->wl = wl;
pub->unit = unit;
pub->_piomode = piomode;
wlc->bandinit_pending = false;
wlc->beacon_template_virgin = true;
/* populate struct brcms_c_info with default values */
brcms_c_info_init(wlc, unit);
/* update sta/ap related parameters */
brcms_c_ap_upd(wlc);
/*
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
err = brcms_b_attach(wlc, core, unit, piomode);
if (err)
goto fail;
brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, OFF);
pub->phy_11ncapable = BRCMS_PHY_11N_CAP(wlc->band);
/* disable allowed duty cycle */
wlc->tx_duty_cycle_ofdm = 0;
wlc->tx_duty_cycle_cck = 0;
brcms_c_stf_phy_chain_calc(wlc);
/* txchain 1: txant 0, txchain 2: txant 1 */
if (BRCMS_ISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
wlc->stf->txant = wlc->stf->hw_txchain - 1;
/* push to BMAC driver */
wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain,
wlc->stf->hw_rxchain);
/* pull up some info resulting from the low attach */
for (i = 0; i < NFIFO; i++)
wlc->core->txavail[i] = wlc->hw->txavail[i];
memcpy(&wlc->perm_etheraddr, &wlc->hw->etheraddr, ETH_ALEN);
memcpy(&pub->cur_etheraddr, &wlc->hw->etheraddr, ETH_ALEN);
for (j = 0; j < wlc->pub->_nbands; j++) {
wlc->band = wlc->bandstate[j];
if (!brcms_c_attach_stf_ant_init(wlc)) {
err = 24;
goto fail;
}
/* default contention windows size limits */
wlc->band->CWmin = APHY_CWMIN;
wlc->band->CWmax = PHY_CWMAX;
/* init gmode value */
if (wlc->band->bandtype == BRCM_BAND_2G) {
wlc->band->gmode = GMODE_AUTO;
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER,
wlc->band->gmode);
}
/* init _n_enab supported mode */
if (BRCMS_PHY_11N_CAP(wlc->band)) {
pub->_n_enab = SUPPORT_11N;
brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
((pub->_n_enab ==
SUPPORT_11N) ? WL_11N_2x2 :
WL_11N_3x3));
}
/* init per-band default rateset, depend on band->gmode */
brcms_default_rateset(wlc, &wlc->band->defrateset);
/* fill in hw_rateset */
brcms_c_rateset_filter(&wlc->band->defrateset,
&wlc->band->hw_rateset, false,
BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
(bool) (wlc->pub->_n_enab & SUPPORT_11N));
}
/*
* update antenna config due to
* wlc->stf->txant/txchain/ant_rx_ovr change
*/
brcms_c_stf_phy_txant_upd(wlc);
/* attach each modules */
err = brcms_c_attach_module(wlc);
if (err != 0)
goto fail;
if (!brcms_c_timers_init(wlc, unit)) {
wiphy_err(wl->wiphy, "wl%d: %s: init_timer failed\n", unit,
__func__);
err = 32;
goto fail;
}
/* depend on rateset, gmode */
wlc->cmi = brcms_c_channel_mgr_attach(wlc);
if (!wlc->cmi) {
wiphy_err(wl->wiphy, "wl%d: %s: channel_mgr_attach failed"
"\n", unit, __func__);
err = 33;
goto fail;
}
/* init default when all parameters are ready, i.e. ->rateset */
brcms_c_bss_default_init(wlc);
/*
* Complete the wlc default state initializations..
*/
wlc->bsscfg->wlc = wlc;
wlc->mimoft = FT_HT;
wlc->mimo_40txbw = AUTO;
wlc->ofdm_40txbw = AUTO;
wlc->cck_40txbw = AUTO;
brcms_c_update_mimo_band_bwcap(wlc, BRCMS_N_BW_20IN2G_40IN5G);
/* Set default values of SGI */
if (BRCMS_SGI_CAP_PHY(wlc)) {
brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
BRCMS_N_SGI_40));
} else if (BRCMS_ISSSLPNPHY(wlc->band)) {
brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
BRCMS_N_SGI_40));
} else {
brcms_c_ht_update_sgi_rx(wlc, 0);
}
brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
if (perr)
*perr = 0;
return wlc;
fail:
wiphy_err(wl->wiphy, "wl%d: %s: failed with err %d\n",
unit, __func__, err);
if (wlc)
brcms_c_detach(wlc);
if (perr)
*perr = err;
return NULL;
}
| gpl-2.0 |
tchaari/android_kernel_samsung_crespo_kitkang | drivers/net/wan/lapbether.c | 2378 | 10300 | /*
* "LAPB via ethernet" driver release 001
*
* This code REQUIRES 2.1.15 or higher/ NET3.038
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This is a "pseudo" network driver to allow LAPB over Ethernet.
*
* This driver can use any ethernet destination address, and can be
* limited to accept frames from one dedicated ethernet card only.
*
* History
* LAPBETH 001 Jonathan Naylor Cloned from bpqether.c
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
* 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <net/x25device.h>
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
* in lapbeth_new_device is large enough to store the probe device name.*/
#define MAXLAPBDEV 100
struct lapbethdev {
struct list_head node;
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* lapbeth device (lapb#) */
};
static LIST_HEAD(lapbeth_devices);
/* ------------------------------------------------------------------------ */
/*
* Get the LAPB device for the ethernet device
*/
static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
return NULL;
}
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
}
/* ------------------------------------------------------------------------ */
/*
* Receive a LAPB frame via an ethernet interface.
*/
static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
{
int len, err;
struct lapbethdev *lapbeth;
if (dev_net(dev) != &init_net)
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
return NET_RX_DROP;
if (!pskb_may_pull(skb, 2))
goto drop;
rcu_read_lock();
lapbeth = lapbeth_get_x25_dev(dev);
if (!lapbeth)
goto drop_unlock;
if (!netif_running(lapbeth->axdev))
goto drop_unlock;
len = skb->data[0] + skb->data[1] * 256;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
goto drop_unlock;
}
out:
rcu_read_unlock();
return 0;
drop_unlock:
kfree_skb(skb);
goto out;
drop:
kfree_skb(skb);
return 0;
}
static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
skb_push(skb, 1);
if (skb_cow(skb, 1))
return NET_RX_DROP;
ptr = skb->data;
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
}
/*
* Send a LAPB frame via an ethernet interface
*/
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int err;
/*
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
*/
if (!netif_running(dev))
goto drop;
switch (skb->data[0]) {
case X25_IFACE_DATA:
break;
case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
goto drop;
case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
/* Fall thru */
default:
goto drop;
}
skb_pull(skb, 1);
if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
goto drop;
}
out:
return NETDEV_TX_OK;
drop:
kfree_skb(skb);
goto out;
}
static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
{
struct lapbethdev *lapbeth = netdev_priv(ndev);
unsigned char *ptr;
struct net_device *dev;
int size = skb->len;
skb->protocol = htons(ETH_P_X25);
ptr = skb_push(skb, 2);
*ptr++ = size % 256;
*ptr++ = size / 256;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += size;
skb->dev = dev = lapbeth->ethdev;
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb);
}
static void lapbeth_connected(struct net_device *dev, int reason)
{
unsigned char *ptr;
struct sk_buff *skb = dev_alloc_skb(1);
if (!skb) {
printk(KERN_ERR "lapbeth: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
}
static void lapbeth_disconnected(struct net_device *dev, int reason)
{
unsigned char *ptr;
struct sk_buff *skb = dev_alloc_skb(1);
if (!skb) {
printk(KERN_ERR "lapbeth: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
}
/*
* Set AX.25 callsign
*/
static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
static struct lapb_register_struct lapbeth_callbacks = {
.connect_confirmation = lapbeth_connected,
.connect_indication = lapbeth_connected,
.disconnect_confirmation = lapbeth_disconnected,
.disconnect_indication = lapbeth_disconnected,
.data_indication = lapbeth_data_indication,
.data_transmit = lapbeth_data_transmit,
};
/*
* open/close a device
*/
static int lapbeth_open(struct net_device *dev)
{
int err;
if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
printk(KERN_ERR "lapbeth: lapb_register error - %d\n", err);
return -ENODEV;
}
netif_start_queue(dev);
return 0;
}
static int lapbeth_close(struct net_device *dev)
{
int err;
netif_stop_queue(dev);
if ((err = lapb_unregister(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_unregister error - %d\n", err);
return 0;
}
/* ------------------------------------------------------------------------ */
static const struct net_device_ops lapbeth_netdev_ops = {
.ndo_open = lapbeth_open,
.ndo_stop = lapbeth_close,
.ndo_start_xmit = lapbeth_xmit,
.ndo_set_mac_address = lapbeth_set_mac_address,
};
static void lapbeth_setup(struct net_device *dev)
{
dev->netdev_ops = &lapbeth_netdev_ops;
dev->destructor = free_netdev;
dev->type = ARPHRD_X25;
dev->hard_header_len = 3;
dev->mtu = 1000;
dev->addr_len = 0;
}
/*
* Setup a new device.
*/
static int lapbeth_new_device(struct net_device *dev)
{
struct net_device *ndev;
struct lapbethdev *lapbeth;
int rc = -ENOMEM;
ASSERT_RTNL();
ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d",
lapbeth_setup);
if (!ndev)
goto out;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
dev_hold(dev);
lapbeth->ethdev = dev;
rc = -EIO;
if (register_netdevice(ndev))
goto fail;
list_add_rcu(&lapbeth->node, &lapbeth_devices);
rc = 0;
out:
return rc;
fail:
dev_put(dev);
free_netdev(ndev);
kfree(lapbeth);
goto out;
}
/*
* Free a lapb network device.
*/
static void lapbeth_free_device(struct lapbethdev *lapbeth)
{
dev_put(lapbeth->ethdev);
list_del_rcu(&lapbeth->node);
unregister_netdevice(lapbeth->axdev);
}
/*
* Handle device status changes.
*
* Called from notifier with RTNL held.
*/
static int lapbeth_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct lapbethdev *lapbeth;
struct net_device *dev = ptr;
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
if (!dev_is_ethdev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
/* New ethernet device -> new LAPB interface */
if (lapbeth_get_x25_dev(dev) == NULL)
lapbeth_new_device(dev);
break;
case NETDEV_DOWN:
/* ethernet device closed -> close LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
if (lapbeth)
dev_close(lapbeth->axdev);
break;
case NETDEV_UNREGISTER:
/* ethernet device disappears -> remove LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
if (lapbeth)
lapbeth_free_device(lapbeth);
break;
}
return NOTIFY_DONE;
}
/* ------------------------------------------------------------------------ */
static struct packet_type lapbeth_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_DEC),
.func = lapbeth_rcv,
};
static struct notifier_block lapbeth_dev_notifier = {
.notifier_call = lapbeth_device_event,
};
static const char banner[] __initconst =
KERN_INFO "LAPB Ethernet driver version 0.02\n";
static int __init lapbeth_init_driver(void)
{
dev_add_pack(&lapbeth_packet_type);
register_netdevice_notifier(&lapbeth_dev_notifier);
printk(banner);
return 0;
}
module_init(lapbeth_init_driver);
static void __exit lapbeth_cleanup_driver(void)
{
struct lapbethdev *lapbeth;
struct list_head *entry, *tmp;
dev_remove_pack(&lapbeth_packet_type);
unregister_netdevice_notifier(&lapbeth_dev_notifier);
rtnl_lock();
list_for_each_safe(entry, tmp, &lapbeth_devices) {
lapbeth = list_entry(entry, struct lapbethdev, node);
dev_put(lapbeth->ethdev);
unregister_netdevice(lapbeth->axdev);
}
rtnl_unlock();
}
module_exit(lapbeth_cleanup_driver);
MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
VanirAOSP/kernel_samsung_skomer | net/batman-adv/vis.c | 2378 | 26429 | /*
* Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*
*/
#include "main.h"
#include "send.h"
#include "translation-table.h"
#include "vis.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "hash.h"
#include "originator.h"
#define MAX_VIS_PACKET_SIZE 1000
/* Returns the smallest signed integer in two's complement with the sizeof x */
#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
* they handle overflows/underflows and can correctly check for a
* predecessor/successor unless the variable sequence number has grown by
* more then 2**(bitwidth(x)-1)-1.
* This means that for a uint8_t with the maximum value 255, it would think:
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
* - after adding more than 127 to the starting value - it is a successor */
#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
static void start_vis_timer(struct bat_priv *bat_priv);
/* free the info */
static void free_info(struct kref *ref)
{
struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
list_del_init(&info->send_list);
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
static int vis_info_cmp(struct hlist_node *node, void *data2)
{
struct vis_info *d1, *d2;
struct vis_packet *p1, *p2;
d1 = container_of(node, struct vis_info, hash_entry);
d2 = data2;
p1 = (struct vis_packet *)d1->skb_packet->data;
p2 = (struct vis_packet *)d2->skb_packet->data;
return compare_eth(p1->vis_orig, p2->vis_orig);
}
/* hash function to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
static int vis_info_choose(void *data, int size)
{
struct vis_info *vis_info = data;
struct vis_packet *packet;
unsigned char *key;
uint32_t hash = 0;
size_t i;
packet = (struct vis_packet *)vis_info->skb_packet->data;
key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash % size;
}
static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
void *data)
{
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_head *head;
struct hlist_node *node;
struct vis_info *vis_info, *vis_info_tmp = NULL;
int index;
if (!hash)
return NULL;
index = vis_info_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
if (!vis_info_cmp(node, data))
continue;
vis_info_tmp = vis_info;
break;
}
rcu_read_unlock();
return vis_info_tmp;
}
/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
static void vis_data_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
struct if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
if (compare_eth(entry->addr, (void *)interface))
return;
}
/* its a new address, add it to the list */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->addr, interface, ETH_ALEN);
entry->primary = primary;
hlist_add_head(&entry->list, if_list);
}
static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
len += sprintf(buff + len, "PRIMARY, ");
else
len += sprintf(buff + len, "SEC %pM, ", entry->addr);
}
return len;
}
static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t count = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
count += 9;
else
count += 23;
}
return count;
}
/* read an entry */
static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
uint8_t *src, bool primary)
{
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
if (primary && entry->quality == 0)
return sprintf(buff, "TT %pM, ", entry->dest);
else if (compare_eth(entry->src, src))
return sprintf(buff, "TQ %pM %d, ", entry->dest,
entry->quality);
return 0;
}
int vis_seq_print_text(struct seq_file *seq, void *offset)
{
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
struct vis_info *info;
struct vis_packet *packet;
struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hashtable_t *hash = bat_priv->vis_hash;
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
int i, j, ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
int compare;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
if (vis_server == VIS_TYPE_CLIENT_UPDATE)
goto out;
buf_size = 1;
/* Estimate length */
spin_lock_bh(&bat_priv->vis_hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buf_size += 18 + 26 * packet->entries;
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buf_size +=
vis_data_count_prim_sec(&vis_if_list);
buf_size += 1;
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
spin_unlock_bh(&bat_priv->vis_hash_lock);
ret = -ENOMEM;
goto out;
}
buff[0] = '\0';
buff_pos = 0;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
&entries[j],
entry->addr,
entry->primary);
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buff_pos +=
vis_data_read_prim_sec(buff + buff_pos,
&vis_if_list);
buff_pos += sprintf(buff + buff_pos, "\n");
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
return ret;
}
/* add the info packet to the send list, if it was not
* already linked in. */
static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
list_add_tail(&info->send_list, &bat_priv->vis_send_list);
}
}
/* delete the info packet from the send list, if it was
* linked in. */
static void send_list_del(struct vis_info *info)
{
if (!list_empty(&info->send_list)) {
list_del_init(&info->send_list);
kref_put(&info->refcount, free_info);
}
}
/* tries to add one entry to the receive list. */
static void recv_list_add(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->mac, mac, ETH_ALEN);
spin_lock_bh(&bat_priv->vis_list_lock);
list_add_tail(&entry->list, recv_list);
spin_unlock_bh(&bat_priv->vis_list_lock);
}
/* returns 1 if this mac is in the recv_list */
static int recv_list_is_in(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
if (compare_eth(entry->mac, mac)) {
spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
}
spin_unlock_bh(&bat_priv->vis_list_lock);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
static struct vis_info *add_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len, int *is_new,
int make_broadcast)
{
struct vis_info *info, *old_info;
struct vis_packet *search_packet, *old_packet;
struct vis_info search_elem;
struct vis_packet *packet;
int hash_added;
*is_new = 0;
/* sanity check */
if (!bat_priv->vis_hash)
return NULL;
/* see if the packet is already in vis_hash */
search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
if (!search_elem.skb_packet)
return NULL;
search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
sizeof(struct vis_packet));
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
old_info = vis_hash_find(bat_priv, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info) {
old_packet = (struct vis_packet *)old_info->skb_packet->data;
if (!seq_after(ntohl(vis_packet->seqno),
ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
recv_list_add(bat_priv, &old_info->recv_list,
vis_packet->sender_orig);
return old_info;
} else {
/* newer packet is already in hash. */
return NULL;
}
}
/* remove old entry */
hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
old_info);
send_list_del(old_info);
kref_put(&old_info->refcount, free_info);
}
info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
if (!info)
return NULL;
info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
vis_info_len + sizeof(struct ethhdr));
if (!info->skb_packet) {
kfree(info);
return NULL;
}
skb_reserve(info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(info->skb_packet,
sizeof(struct vis_packet) +
vis_info_len);
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
info->bat_priv = bat_priv;
memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
packet->entries = vis_info_len / sizeof(struct vis_info_entry);
recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
info, &info->hash_entry);
if (hash_added < 0) {
/* did not work (for some reason) */
kref_put(&info->refcount, free_info);
info = NULL;
}
return info;
}
/* handle the server sync packet, forward if needed. */
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
int is_new, make_broadcast;
int vis_server = atomic_read(&bat_priv->vis_mode);
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, make_broadcast);
if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
send_list_add(bat_priv, info);
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
struct vis_packet *packet;
int is_new;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
/* clients shall not broadcast. */
if (is_broadcast_ether_addr(vis_packet->target_orig))
return;
/* Are we the target for this VIS packet? */
if (vis_server == VIS_TYPE_SERVER_SYNC &&
is_my_mac(vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, are_target);
if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
packet = (struct vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(packet->target_orig)) {
send_list_add(bat_priv, info);
}
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
* Must be called with the originator hash locked */
static int find_best_vis_server(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct neigh_node *router;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
int best_tq = -1, i;
packet = (struct vis_packet *)info->skb_packet->data;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if ((orig_node->flags & VIS_SERVER) &&
(router->tq_avg > best_tq)) {
best_tq = router->tq_avg;
memcpy(packet->target_orig, orig_node->orig,
ETH_ALEN);
}
neigh_node_free_ref(router);
}
rcu_read_unlock();
}
return best_tq;
}
/* Return true if the vis packet is full. */
static bool vis_packet_full(struct vis_info *info)
{
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
< packet->entries + 1)
return true;
return false;
}
/* generates a packet of own vis data,
* returns 0 on success, -1 if no packet could be generated */
static int generate_vis_packet(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct neigh_node *router;
struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
struct tt_local_entry *tt_local_entry;
int best_tq = -1, i;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
packet->ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
skb_trim(info->skb_packet, sizeof(struct vis_packet));
if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
best_tq = find_best_vis_server(bat_priv, info);
if (best_tq < 0)
return -1;
}
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if (!compare_eth(router->addr, orig_node->orig))
goto next;
if (router->if_incoming->if_status != IF_ACTIVE)
goto next;
if (router->tq_avg < 1)
goto next;
/* fill one entry into buffer. */
entry = (struct vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
ETH_ALEN);
memcpy(entry->dest, orig_node->orig, ETH_ALEN);
entry->quality = router->tq_avg;
packet->entries++;
next:
neigh_node_free_ref(router);
if (vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
}
hash = bat_priv->tt_local_hash;
spin_lock_bh(&bat_priv->tt_lhash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
entry = (struct vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means TT */
packet->entries++;
if (vis_packet_full(info)) {
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
}
}
}
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
unlock:
rcu_read_unlock();
return 0;
}
/* free old vis packets. Must be called with this vis_hash_lock
* held */
static void purge_vis_packets(struct bat_priv *bat_priv)
{
int i;
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry_safe(info, node, node_tmp,
head, hash_entry) {
/* never purge own data. */
if (info == bat_priv->my_vis_info)
continue;
if (time_after(jiffies,
info->first_seen + VIS_TIMEOUT * HZ)) {
hlist_del(node);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
}
}
}
static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct neigh_node *router;
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
struct sk_buff *skb;
struct hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
int i;
packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
if (!(orig_node->flags & VIS_SERVER))
continue;
router = orig_node_get_router(orig_node);
if (!router)
continue;
/* don't send it if we already received the packet from
* this node. */
if (recv_list_is_in(bat_priv, &info->recv_list,
orig_node->orig)) {
neigh_node_free_ref(router);
continue;
}
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
hard_iface = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
neigh_node_free_ref(router);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, hard_iface, dstaddr);
}
rcu_read_unlock();
}
}
static void unicast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct orig_node *orig_node;
struct neigh_node *router = NULL;
struct sk_buff *skb;
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
orig_node = orig_hash_find(bat_priv, packet->target_orig);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
}
/* only send one vis packet. called from send_vis_packets() */
static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
{
struct hard_iface *primary_if;
struct vis_packet *packet;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
packet = (struct vis_packet *)info->skb_packet->data;
if (packet->ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
}
memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
packet->ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
broadcast_vis_packet(bat_priv, info);
else
unicast_vis_packet(bat_priv, info);
packet->ttl++; /* restore TTL */
out:
if (primary_if)
hardif_free_ref(primary_if);
}
/* called from timer; send (and maybe generate) vis packet. */
static void send_vis_packets(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
send_list_add(bat_priv, bat_priv->my_vis_info);
}
while (!list_empty(&bat_priv->vis_send_list)) {
info = list_first_entry(&bat_priv->vis_send_list,
typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
send_vis_packet(bat_priv, info);
spin_lock_bh(&bat_priv->vis_hash_lock);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
}
/* init the vis server. this may only be called when if_list is already
* initialized (e.g. bat0 is initialized, interfaces have been added) */
int vis_init(struct bat_priv *bat_priv)
{
struct vis_packet *packet;
int hash_added;
if (bat_priv->vis_hash)
return 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
bat_priv->vis_hash = hash_new(256);
if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->my_vis_info) {
pr_err("Can't initialize vis packet\n");
goto err;
}
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
sizeof(struct vis_packet) +
MAX_VIS_PACKET_SIZE +
sizeof(struct ethhdr));
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(
bat_priv->my_vis_info->skb_packet,
sizeof(struct vis_packet));
/* prefill the vis info */
bat_priv->my_vis_info->first_seen = jiffies -
msecs_to_jiffies(VIS_INTERVAL);
INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
packet->version = COMPAT_VERSION;
packet->packet_type = BAT_VIS;
packet->ttl = TTL;
packet->seqno = 0;
packet->entries = 0;
INIT_LIST_HEAD(&bat_priv->vis_send_list);
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
bat_priv->my_vis_info,
&bat_priv->my_vis_info->hash_entry);
if (hash_added < 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&bat_priv->my_vis_info->refcount, free_info);
goto err;
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
return 1;
free_info:
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
vis_quit(bat_priv);
return 0;
}
/* Decrease the reference count on a hash item info */
static void free_info_ref(struct hlist_node *node, void *arg)
{
struct vis_info *info;
info = container_of(node, struct vis_info, hash_entry);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
/* shutdown vis-server */
void vis_quit(struct bat_priv *bat_priv)
{
if (!bat_priv->vis_hash)
return;
cancel_delayed_work_sync(&bat_priv->vis_work);
spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */
static void start_vis_timer(struct bat_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
msecs_to_jiffies(VIS_INTERVAL));
}
| gpl-2.0 |
satgass/android_kernel_lenovo_msm8916 | drivers/gpu/drm/radeon/atom.c | 2634 | 35288 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Stanislaw Skowronek
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#define ATOM_DEBUG
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
#include "radeon.h"
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
#define ATOM_COND_ALWAYS 2
#define ATOM_COND_BELOW 3
#define ATOM_COND_BELOWOREQUAL 4
#define ATOM_COND_EQUAL 5
#define ATOM_COND_NOTEQUAL 6
#define ATOM_PORT_ATI 0
#define ATOM_PORT_PCI 1
#define ATOM_PORT_SYSIO 2
#define ATOM_UNIT_MICROSEC 0
#define ATOM_UNIT_MILLISEC 1
#define PLL_INDEX 2
#define PLL_DATA 3
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
int ps_shift;
uint16_t start;
unsigned last_jump;
unsigned long last_jump_jiffies;
bool abort;
} atom_exec_context;
int atom_debug = 0;
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
0xFF000000 };
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
static int atom_dst_to_src[8][4] = {
/* translate destination alignment field to the source alignment encoding */
{0, 0, 0, 0},
{1, 2, 3, 0},
{1, 2, 3, 0},
{1, 2, 3, 0},
{4, 5, 6, 7},
{4, 5, 6, 7},
{4, 5, 6, 7},
{4, 5, 6, 7},
};
static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
static int debug_depth = 0;
#ifdef ATOM_DEBUG
static void debug_print_spaces(int n)
{
while (n--)
printk(" ");
}
#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
#else
#define DEBUG(...) do { } while (0)
#define SDEBUG(...) do { } while (0)
#endif
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
base++;
break;
case ATOM_IIO_READ:
temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
if (rdev->family == CHIP_RV515)
(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 2));
base += 3;
break;
case ATOM_IIO_SET:
temp |=
(0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
2);
base += 3;
break;
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
3);
base += 4;
break;
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
3);
base += 4;
break;
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
CU8
(base
+
1))))
<< CU8(base + 3);
base += 4;
break;
case ATOM_IIO_END:
return temp;
default:
printk(KERN_INFO "Unknown IIO opcode.\n");
return 0;
}
}
static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
int *ptr, uint32_t *saved, int print)
{
uint32_t idx, val = 0xCDCDCDCD, align, arg;
struct atom_context *gctx = ctx->ctx;
arg = attr & 7;
align = (attr >> 3) & 7;
switch (arg) {
case ATOM_ARG_REG:
idx = U16(*ptr);
(*ptr) += 2;
if (print)
DEBUG("REG[0x%04X]", idx);
idx += gctx->reg_block;
switch (gctx->io_mode) {
case ATOM_IO_MM:
val = gctx->card->reg_read(gctx->card, idx);
break;
case ATOM_IO_PCI:
printk(KERN_INFO
"PCI registers are not implemented.\n");
return 0;
case ATOM_IO_SYSIO:
printk(KERN_INFO
"SYSIO registers are not implemented.\n");
return 0;
default:
if (!(gctx->io_mode & 0x80)) {
printk(KERN_INFO "Bad IO mode.\n");
return 0;
}
if (!gctx->iio[gctx->io_mode & 0x7F]) {
printk(KERN_INFO
"Undefined indirect IO read method %d.\n",
gctx->io_mode & 0x7F);
return 0;
}
val =
atom_iio_execute(gctx,
gctx->iio[gctx->io_mode & 0x7F],
idx, 0);
}
break;
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
case ATOM_ARG_WS:
idx = U8(*ptr);
(*ptr)++;
if (print)
DEBUG("WS[0x%02X]", idx);
switch (idx) {
case ATOM_WS_QUOTIENT:
val = gctx->divmul[0];
break;
case ATOM_WS_REMAINDER:
val = gctx->divmul[1];
break;
case ATOM_WS_DATAPTR:
val = gctx->data_block;
break;
case ATOM_WS_SHIFT:
val = gctx->shift;
break;
case ATOM_WS_OR_MASK:
val = 1 << gctx->shift;
break;
case ATOM_WS_AND_MASK:
val = ~(1 << gctx->shift);
break;
case ATOM_WS_FB_WINDOW:
val = gctx->fb_base;
break;
case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr;
break;
case ATOM_WS_REGPTR:
val = gctx->reg_block;
break;
default:
val = ctx->ws[idx];
}
break;
case ATOM_ARG_ID:
idx = U16(*ptr);
(*ptr) += 2;
if (print) {
if (gctx->data_block)
DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
else
DEBUG("ID[0x%04X]", idx);
}
val = U32(idx + gctx->data_block);
break;
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
val = 0;
} else
val = gctx->scratch[(gctx->fb_base / 4) + idx];
if (print)
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_IMM:
switch (align) {
case ATOM_SRC_DWORD:
val = U32(*ptr);
(*ptr) += 4;
if (print)
DEBUG("IMM 0x%08X\n", val);
return val;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
val = U16(*ptr);
(*ptr) += 2;
if (print)
DEBUG("IMM 0x%04X\n", val);
return val;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
case ATOM_SRC_BYTE24:
val = U8(*ptr);
(*ptr)++;
if (print)
DEBUG("IMM 0x%02X\n", val);
return val;
}
return 0;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
if (print)
DEBUG("PLL[0x%02X]", idx);
val = gctx->card->pll_read(gctx->card, idx);
break;
case ATOM_ARG_MC:
idx = U8(*ptr);
(*ptr)++;
if (print)
DEBUG("MC[0x%02X]", idx);
val = gctx->card->mc_read(gctx->card, idx);
break;
}
if (saved)
*saved = val;
val &= atom_arg_mask[align];
val >>= atom_arg_shift[align];
if (print)
switch (align) {
case ATOM_SRC_DWORD:
DEBUG(".[31:0] -> 0x%08X\n", val);
break;
case ATOM_SRC_WORD0:
DEBUG(".[15:0] -> 0x%04X\n", val);
break;
case ATOM_SRC_WORD8:
DEBUG(".[23:8] -> 0x%04X\n", val);
break;
case ATOM_SRC_WORD16:
DEBUG(".[31:16] -> 0x%04X\n", val);
break;
case ATOM_SRC_BYTE0:
DEBUG(".[7:0] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE8:
DEBUG(".[15:8] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE16:
DEBUG(".[23:16] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE24:
DEBUG(".[31:24] -> 0x%02X\n", val);
break;
}
return val;
}
static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
{
uint32_t align = (attr >> 3) & 7, arg = attr & 7;
switch (arg) {
case ATOM_ARG_REG:
case ATOM_ARG_ID:
(*ptr) += 2;
break;
case ATOM_ARG_PLL:
case ATOM_ARG_MC:
case ATOM_ARG_PS:
case ATOM_ARG_WS:
case ATOM_ARG_FB:
(*ptr)++;
break;
case ATOM_ARG_IMM:
switch (align) {
case ATOM_SRC_DWORD:
(*ptr) += 4;
return;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
(*ptr) += 2;
return;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
case ATOM_SRC_BYTE24:
(*ptr)++;
return;
}
return;
}
}
static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
{
return atom_get_src_int(ctx, attr, ptr, NULL, 1);
}
static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
{
uint32_t val = 0xCDCDCDCD;
switch (align) {
case ATOM_SRC_DWORD:
val = U32(*ptr);
(*ptr) += 4;
break;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
val = U16(*ptr);
(*ptr) += 2;
break;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
case ATOM_SRC_BYTE24:
val = U8(*ptr);
(*ptr)++;
break;
}
return val;
}
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print)
{
return atom_get_src_int(ctx,
arg | atom_dst_to_src[(attr >> 3) &
7][(attr >> 6) & 3] << 3,
ptr, saved, print);
}
static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
{
atom_skip_src_int(ctx,
arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
3] << 3, ptr);
}
static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t val, uint32_t saved)
{
uint32_t align =
atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
val, idx;
struct atom_context *gctx = ctx->ctx;
old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
val <<= atom_arg_shift[align];
val &= atom_arg_mask[align];
saved &= ~atom_arg_mask[align];
val |= saved;
switch (arg) {
case ATOM_ARG_REG:
idx = U16(*ptr);
(*ptr) += 2;
DEBUG("REG[0x%04X]", idx);
idx += gctx->reg_block;
switch (gctx->io_mode) {
case ATOM_IO_MM:
if (idx == 0)
gctx->card->reg_write(gctx->card, idx,
val << 2);
else
gctx->card->reg_write(gctx->card, idx, val);
break;
case ATOM_IO_PCI:
printk(KERN_INFO
"PCI registers are not implemented.\n");
return;
case ATOM_IO_SYSIO:
printk(KERN_INFO
"SYSIO registers are not implemented.\n");
return;
default:
if (!(gctx->io_mode & 0x80)) {
printk(KERN_INFO "Bad IO mode.\n");
return;
}
if (!gctx->iio[gctx->io_mode & 0xFF]) {
printk(KERN_INFO
"Undefined indirect IO write method %d.\n",
gctx->io_mode & 0x7F);
return;
}
atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
idx, val);
}
break;
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
idx = U8(*ptr);
(*ptr)++;
DEBUG("WS[0x%02X]", idx);
switch (idx) {
case ATOM_WS_QUOTIENT:
gctx->divmul[0] = val;
break;
case ATOM_WS_REMAINDER:
gctx->divmul[1] = val;
break;
case ATOM_WS_DATAPTR:
gctx->data_block = val;
break;
case ATOM_WS_SHIFT:
gctx->shift = val;
break;
case ATOM_WS_OR_MASK:
case ATOM_WS_AND_MASK:
break;
case ATOM_WS_FB_WINDOW:
gctx->fb_base = val;
break;
case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val;
break;
case ATOM_WS_REGPTR:
gctx->reg_block = val;
break;
default:
ctx->ws[idx] = val;
}
break;
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
} else
gctx->scratch[(gctx->fb_base / 4) + idx] = val;
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
DEBUG("PLL[0x%02X]", idx);
gctx->card->pll_write(gctx->card, idx, val);
break;
case ATOM_ARG_MC:
idx = U8(*ptr);
(*ptr)++;
DEBUG("MC[0x%02X]", idx);
gctx->card->mc_write(gctx->card, idx, val);
return;
}
switch (align) {
case ATOM_SRC_DWORD:
DEBUG(".[31:0] <- 0x%08X\n", old_val);
break;
case ATOM_SRC_WORD0:
DEBUG(".[15:0] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_WORD8:
DEBUG(".[23:8] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_WORD16:
DEBUG(".[31:16] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_BYTE0:
DEBUG(".[7:0] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE8:
DEBUG(".[15:8] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE16:
DEBUG(".[23:16] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE24:
DEBUG(".[31:24] <- 0x%02X\n", old_val);
break;
}
}
static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst += src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst &= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
{
printk("ATOM BIOS beeped!\n");
}
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8((*ptr)++);
int r = 0;
if (idx < ATOM_TABLE_NAMES_CNT)
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
if (r) {
ctx->abort = true;
}
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t saved;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
}
static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->cs_equal = (dst == src);
ctx->ctx->cs_above = (dst > src);
SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
ctx->ctx->cs_above ? "GT" : "LE");
}
static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
{
unsigned count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
else if (!drm_can_sleep())
mdelay(count);
else
msleep(count);
}
static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
if (src != 0) {
ctx->ctx->divmul[0] = dst / src;
ctx->ctx->divmul[1] = dst % src;
} else {
ctx->ctx->divmul[0] = 0;
ctx->ctx->divmul[1] = 0;
}
}
static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
{
/* functionally, a nop */
}
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
{
int execute = 0, target = U16(*ptr);
unsigned long cjiffies;
(*ptr) += 2;
switch (arg) {
case ATOM_COND_ABOVE:
execute = ctx->ctx->cs_above;
break;
case ATOM_COND_ABOVEOREQUAL:
execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
break;
case ATOM_COND_ALWAYS:
execute = 1;
break;
case ATOM_COND_BELOW:
execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
break;
case ATOM_COND_BELOWOREQUAL:
execute = !ctx->ctx->cs_above;
break;
case ATOM_COND_EQUAL:
execute = ctx->ctx->cs_equal;
break;
case ATOM_COND_NOTEQUAL:
execute = !ctx->ctx->cs_equal;
break;
}
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
if ((jiffies_to_msecs(cjiffies) > 5000)) {
DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
ctx->abort = true;
}
} else {
/* jiffies wrap around we will just wait a little longer */
ctx->last_jump_jiffies = jiffies;
}
} else {
ctx->last_jump = ctx->start + target;
ctx->last_jump_jiffies = jiffies;
}
*ptr = ctx->start + target;
}
}
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, mask, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" mask: 0x%08x", mask);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst &= mask;
dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t src, saved;
int dptr = *ptr;
if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
else {
atom_skip_dst(ctx, arg, attr, ptr);
saved = 0xCDCDCDCD;
}
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, src, saved);
}
static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->divmul[0] = dst * src;
}
static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
{
/* nothing */
}
static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t val = U8((*ptr)++);
SDEBUG("POST card output: 0x%02X\n", val);
}
static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
{
printk(KERN_INFO "unimplemented!\n");
}
static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
{
printk(KERN_INFO "unimplemented!\n");
}
static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
{
printk(KERN_INFO "unimplemented!\n");
}
static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8(*ptr);
(*ptr)++;
SDEBUG(" block: %d\n", idx);
if (!idx)
ctx->ctx->data_block = 0;
else if (idx == 255)
ctx->ctx->data_block = ctx->start;
else
ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
}
static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
SDEBUG(" fb_base: ");
ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
}
static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
{
int port;
switch (arg) {
case ATOM_PORT_ATI:
port = U16(*ptr);
if (port < ATOM_IO_NAMES_CNT)
SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
else
SDEBUG(" port: %d\n", port);
if (!port)
ctx->ctx->io_mode = ATOM_IO_MM;
else
ctx->ctx->io_mode = ATOM_IO_IIO | port;
(*ptr) += 2;
break;
case ATOM_PORT_PCI:
ctx->ctx->io_mode = ATOM_IO_PCI;
(*ptr)++;
break;
case ATOM_PORT_SYSIO:
ctx->ctx->io_mode = ATOM_IO_SYSIO;
(*ptr)++;
break;
}
}
static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
{
ctx->ctx->reg_block = U16(*ptr);
(*ptr) += 2;
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
}
static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst -= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t src, val, target;
SDEBUG(" switch: ");
src = atom_get_src(ctx, attr, ptr);
while (U16(*ptr) != ATOM_CASE_END)
if (U8(*ptr) == ATOM_CASE_MAGIC) {
(*ptr)++;
SDEBUG(" case: ");
val =
atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
ptr);
target = U16(*ptr);
if (val == src) {
SDEBUG(" target: %04X\n", target);
*ptr = ctx->start + target;
return;
}
(*ptr) += 2;
} else {
printk(KERN_INFO "Bad case.\n");
return;
}
(*ptr) += 2;
}
static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->cs_equal = ((dst & src) == 0);
SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
}
static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst ^= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
{
printk(KERN_INFO "unimplemented!\n");
}
static struct {
void (*func) (atom_exec_context *, int *, int);
int arg;
} opcode_table[ATOM_OP_CNT] = {
{
NULL, 0}, {
atom_op_move, ATOM_ARG_REG}, {
atom_op_move, ATOM_ARG_PS}, {
atom_op_move, ATOM_ARG_WS}, {
atom_op_move, ATOM_ARG_FB}, {
atom_op_move, ATOM_ARG_PLL}, {
atom_op_move, ATOM_ARG_MC}, {
atom_op_and, ATOM_ARG_REG}, {
atom_op_and, ATOM_ARG_PS}, {
atom_op_and, ATOM_ARG_WS}, {
atom_op_and, ATOM_ARG_FB}, {
atom_op_and, ATOM_ARG_PLL}, {
atom_op_and, ATOM_ARG_MC}, {
atom_op_or, ATOM_ARG_REG}, {
atom_op_or, ATOM_ARG_PS}, {
atom_op_or, ATOM_ARG_WS}, {
atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, {
atom_op_shift_left, ATOM_ARG_REG}, {
atom_op_shift_left, ATOM_ARG_PS}, {
atom_op_shift_left, ATOM_ARG_WS}, {
atom_op_shift_left, ATOM_ARG_FB}, {
atom_op_shift_left, ATOM_ARG_PLL}, {
atom_op_shift_left, ATOM_ARG_MC}, {
atom_op_shift_right, ATOM_ARG_REG}, {
atom_op_shift_right, ATOM_ARG_PS}, {
atom_op_shift_right, ATOM_ARG_WS}, {
atom_op_shift_right, ATOM_ARG_FB}, {
atom_op_shift_right, ATOM_ARG_PLL}, {
atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, {
atom_op_mul, ATOM_ARG_FB}, {
atom_op_mul, ATOM_ARG_PLL}, {
atom_op_mul, ATOM_ARG_MC}, {
atom_op_div, ATOM_ARG_REG}, {
atom_op_div, ATOM_ARG_PS}, {
atom_op_div, ATOM_ARG_WS}, {
atom_op_div, ATOM_ARG_FB}, {
atom_op_div, ATOM_ARG_PLL}, {
atom_op_div, ATOM_ARG_MC}, {
atom_op_add, ATOM_ARG_REG}, {
atom_op_add, ATOM_ARG_PS}, {
atom_op_add, ATOM_ARG_WS}, {
atom_op_add, ATOM_ARG_FB}, {
atom_op_add, ATOM_ARG_PLL}, {
atom_op_add, ATOM_ARG_MC}, {
atom_op_sub, ATOM_ARG_REG}, {
atom_op_sub, ATOM_ARG_PS}, {
atom_op_sub, ATOM_ARG_WS}, {
atom_op_sub, ATOM_ARG_FB}, {
atom_op_sub, ATOM_ARG_PLL}, {
atom_op_sub, ATOM_ARG_MC}, {
atom_op_setport, ATOM_PORT_ATI}, {
atom_op_setport, ATOM_PORT_PCI}, {
atom_op_setport, ATOM_PORT_SYSIO}, {
atom_op_setregblock, 0}, {
atom_op_setfbbase, 0}, {
atom_op_compare, ATOM_ARG_REG}, {
atom_op_compare, ATOM_ARG_PS}, {
atom_op_compare, ATOM_ARG_WS}, {
atom_op_compare, ATOM_ARG_FB}, {
atom_op_compare, ATOM_ARG_PLL}, {
atom_op_compare, ATOM_ARG_MC}, {
atom_op_switch, 0}, {
atom_op_jump, ATOM_COND_ALWAYS}, {
atom_op_jump, ATOM_COND_EQUAL}, {
atom_op_jump, ATOM_COND_BELOW}, {
atom_op_jump, ATOM_COND_ABOVE}, {
atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
atom_op_jump, ATOM_COND_NOTEQUAL}, {
atom_op_test, ATOM_ARG_REG}, {
atom_op_test, ATOM_ARG_PS}, {
atom_op_test, ATOM_ARG_WS}, {
atom_op_test, ATOM_ARG_FB}, {
atom_op_test, ATOM_ARG_PLL}, {
atom_op_test, ATOM_ARG_MC}, {
atom_op_delay, ATOM_UNIT_MILLISEC}, {
atom_op_delay, ATOM_UNIT_MICROSEC}, {
atom_op_calltable, 0}, {
atom_op_repeat, 0}, {
atom_op_clear, ATOM_ARG_REG}, {
atom_op_clear, ATOM_ARG_PS}, {
atom_op_clear, ATOM_ARG_WS}, {
atom_op_clear, ATOM_ARG_FB}, {
atom_op_clear, ATOM_ARG_PLL}, {
atom_op_clear, ATOM_ARG_MC}, {
atom_op_nop, 0}, {
atom_op_eot, 0}, {
atom_op_mask, ATOM_ARG_REG}, {
atom_op_mask, ATOM_ARG_PS}, {
atom_op_mask, ATOM_ARG_WS}, {
atom_op_mask, ATOM_ARG_FB}, {
atom_op_mask, ATOM_ARG_PLL}, {
atom_op_mask, ATOM_ARG_MC}, {
atom_op_postcard, 0}, {
atom_op_beep, 0}, {
atom_op_savereg, 0}, {
atom_op_restorereg, 0}, {
atom_op_setdatablock, 0}, {
atom_op_xor, ATOM_ARG_REG}, {
atom_op_xor, ATOM_ARG_PS}, {
atom_op_xor, ATOM_ARG_WS}, {
atom_op_xor, ATOM_ARG_FB}, {
atom_op_xor, ATOM_ARG_PLL}, {
atom_op_xor, ATOM_ARG_MC}, {
atom_op_shl, ATOM_ARG_REG}, {
atom_op_shl, ATOM_ARG_PS}, {
atom_op_shl, ATOM_ARG_WS}, {
atom_op_shl, ATOM_ARG_FB}, {
atom_op_shl, ATOM_ARG_PLL}, {
atom_op_shl, ATOM_ARG_MC}, {
atom_op_shr, ATOM_ARG_REG}, {
atom_op_shr, ATOM_ARG_PS}, {
atom_op_shr, ATOM_ARG_WS}, {
atom_op_shr, ATOM_ARG_FB}, {
atom_op_shr, ATOM_ARG_PLL}, {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
unsigned char op;
atom_exec_context ectx;
int ret = 0;
if (!base)
return -EINVAL;
len = CU16(base + ATOM_CT_SIZE_PTR);
ws = CU8(base + ATOM_CT_WS_PTR);
ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
ptr = base + ATOM_CT_CODE_PTR;
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
ectx.ctx = ctx;
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
else
ectx.ws = NULL;
debug_depth++;
while (1) {
op = CU8(ptr++);
if (op < ATOM_OP_NAMES_CNT)
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
if (ectx.abort) {
DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
base, len, ws, ps, ptr - 1);
ret = -EINVAL;
goto free;
}
if (op < ATOM_OP_CNT && op > 0)
opcode_table[op].func(&ectx, &ptr,
opcode_table[op].arg);
else
break;
if (op == ATOM_OP_EOT)
break;
}
debug_depth--;
SDEBUG("<<\n");
free:
if (ws)
kfree(ectx.ws);
return ret;
}
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
mutex_lock(&ctx->mutex);
/* reset data block */
ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
}
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
if (!ctx->iio)
return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
while (CU8(base) != ATOM_IIO_END)
base += atom_iio_len[CU8(base)];
base += 3;
}
}
struct atom_context *atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
char *str;
char name[512];
int i;
if (!ctx)
return NULL;
ctx->card = card;
ctx->bios = bios;
if (CU16(0) != ATOM_BIOS_MAGIC) {
printk(KERN_INFO "Invalid BIOS magic.\n");
kfree(ctx);
return NULL;
}
if (strncmp
(CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
strlen(ATOM_ATI_MAGIC))) {
printk(KERN_INFO "Invalid ATI magic.\n");
kfree(ctx);
return NULL;
}
base = CU16(ATOM_ROM_TABLE_PTR);
if (strncmp
(CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
strlen(ATOM_ROM_MAGIC))) {
printk(KERN_INFO "Invalid ATOM magic.\n");
kfree(ctx);
return NULL;
}
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
if (!ctx->iio) {
atom_destroy(ctx);
return NULL;
}
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
str++;
/* name string isn't always 0 terminated */
for (i = 0; i < 511; i++) {
name[i] = str[i];
if (name[i] < '.' || name[i] > 'z') {
name[i] = 0;
break;
}
}
printk(KERN_INFO "ATOM BIOS: %s\n", name);
return ctx;
}
int atom_asic_init(struct atom_context *ctx)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
int ret;
memset(ps, 0, 64);
ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
if (!ps[0] || !ps[1])
return 1;
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
if (ret)
return ret;
memset(ps, 0, 64);
if (rdev->family < CHIP_R600) {
if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
}
return ret;
}
void atom_destroy(struct atom_context *ctx)
{
kfree(ctx->iio);
kfree(ctx);
}
bool atom_parse_data_header(struct atom_context *ctx, int index,
uint16_t * size, uint8_t * frev, uint8_t * crev,
uint16_t * data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
if (!mdt[index])
return false;
if (size)
*size = CU16(idx);
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
*data_start = idx;
return true;
}
bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
uint8_t * crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
if (!mct[index])
return false;
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
return true;
}
int atom_allocate_fb_scratch(struct atom_context *ctx)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
| gpl-2.0 |
lovers-fancy/SC-05D_Kernel | drivers/uwb/whc-rc.c | 2634 | 13521 | /*
* Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
* Radio Control command/event transport to the UWB stack
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Initialize and hook up the Radio Control interface.
*
* For each device probed, creates an 'struct whcrc' which contains
* just the representation of the UWB Radio Controller, and the logic
* for reading notifications and passing them to the UWB Core.
*
* So we initialize all of those, register the UWB Radio Controller
* and setup the notification/event handle to pipe the notifications
* to the UWB management Daemon.
*
* Once uwb_rc_add() is called, the UWB stack takes control, resets
* the radio and readies the device to take commands the UWB
* API/user-space.
*
* Note this driver is just a transport driver; the commands are
* formed at the UWB stack and given to this driver who will deliver
* them to the hw and transfer the replies/notifications back to the
* UWB stack through the UWB daemon (UWBD).
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/uwb.h>
#include <linux/uwb/whci.h>
#include <linux/uwb/umc.h>
#include "uwb-internal.h"
/**
* Descriptor for an instance of the UWB Radio Control Driver that
* attaches to the URC interface of the WHCI PCI card.
*
* Unless there is a lock specific to the 'data members', all access
* is protected by uwb_rc->mutex.
*/
struct whcrc {
struct umc_dev *umc_dev;
struct uwb_rc *uwb_rc; /* UWB host controller */
unsigned long area;
void __iomem *rc_base;
size_t rc_len;
spinlock_t irq_lock;
void *evt_buf, *cmd_buf;
dma_addr_t evt_dma_buf, cmd_dma_buf;
wait_queue_head_t cmd_wq;
struct work_struct event_work;
};
/**
* Execute an UWB RC command on WHCI/RC
*
* @rc: Instance of a Radio Controller that is a whcrc
* @cmd: Buffer containing the RCCB and payload to execute
* @cmd_size: Size of the command buffer.
*
* We copy the command into whcrc->cmd_buf (as it is pretty and
* aligned`and physically contiguous) and then press the right keys in
* the controller's URCCMD register to get it to read it. We might
* have to wait for the cmd_sem to be open to us.
*
* NOTE: rc's mutex has to be locked
*/
static int whcrc_cmd(struct uwb_rc *uwb_rc,
const struct uwb_rccb *cmd, size_t cmd_size)
{
int result = 0;
struct whcrc *whcrc = uwb_rc->priv;
struct device *dev = &whcrc->umc_dev->dev;
u32 urccmd;
if (cmd_size >= 4096)
return -EINVAL;
/*
* If the URC is halted, then the hardware has reset itself.
* Attempt to recover by restarting the device and then return
* an error as it's likely that the current command isn't
* valid for a newly started RC.
*/
if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
dev_err(dev, "requesting reset of halted radio controller\n");
uwb_rc_reset_all(uwb_rc);
return -EIO;
}
result = wait_event_timeout(whcrc->cmd_wq,
!(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
if (result == 0) {
dev_err(dev, "device is not ready to execute commands\n");
return -ETIMEDOUT;
}
memmove(whcrc->cmd_buf, cmd, cmd_size);
le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
spin_lock(&whcrc->irq_lock);
urccmd = le_readl(whcrc->rc_base + URCCMD);
urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
whcrc->rc_base + URCCMD);
spin_unlock(&whcrc->irq_lock);
return 0;
}
static int whcrc_reset(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
return umc_controller_reset(whcrc->umc_dev);
}
/**
* Reset event reception mechanism and tell hw we are ready to get more
*
* We have read all the events in the event buffer, so we are ready to
* reset it to the beginning.
*
* This is only called during initialization or after an event buffer
* has been retired. This means we can be sure that event processing
* is disabled and it's safe to update the URCEVTADDR register.
*
* There's no need to wait for the event processing to start as the
* URC will not clear URCCMD_ACTIVE until (internal) event buffer
* space is available.
*/
static
void whcrc_enable_events(struct whcrc *whcrc)
{
u32 urccmd;
le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
spin_lock(&whcrc->irq_lock);
urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
spin_unlock(&whcrc->irq_lock);
}
static void whcrc_event_work(struct work_struct *work)
{
struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
size_t size;
u64 urcevtaddr;
urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
whcrc_enable_events(whcrc);
}
/**
* Catch interrupts?
*
* We ack inmediately (and expect the hw to do the right thing and
* raise another IRQ if things have changed :)
*/
static
irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
{
struct whcrc *whcrc = _whcrc;
struct device *dev = &whcrc->umc_dev->dev;
u32 urcsts;
urcsts = le_readl(whcrc->rc_base + URCSTS);
if (!(urcsts & URCSTS_INT_MASK))
return IRQ_NONE;
le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
if (urcsts & URCSTS_HSE) {
dev_err(dev, "host system error -- hardware halted\n");
/* FIXME: do something sensible here */
goto out;
}
if (urcsts & URCSTS_ER)
schedule_work(&whcrc->event_work);
if (urcsts & URCSTS_RCI)
wake_up_all(&whcrc->cmd_wq);
out:
return IRQ_HANDLED;
}
/**
* Initialize a UMC RC interface: map regions, get (shared) IRQ
*/
static
int whcrc_setup_rc_umc(struct whcrc *whcrc)
{
int result = 0;
struct device *dev = &whcrc->umc_dev->dev;
struct umc_dev *umc_dev = whcrc->umc_dev;
whcrc->area = umc_dev->resource.start;
whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
result = -EBUSY;
if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
goto error_request_region;
}
whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
if (whcrc->rc_base == NULL) {
dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
whcrc->rc_len, whcrc->area, result);
goto error_ioremap_nocache;
}
result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
KBUILD_MODNAME, whcrc);
if (result < 0) {
dev_err(dev, "can't allocate IRQ %d: %d\n",
umc_dev->irq, result);
goto error_request_irq;
}
result = -ENOMEM;
whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
&whcrc->cmd_dma_buf, GFP_KERNEL);
if (whcrc->cmd_buf == NULL) {
dev_err(dev, "Can't allocate cmd transfer buffer\n");
goto error_cmd_buffer;
}
whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
&whcrc->evt_dma_buf, GFP_KERNEL);
if (whcrc->evt_buf == NULL) {
dev_err(dev, "Can't allocate evt transfer buffer\n");
goto error_evt_buffer;
}
return 0;
error_evt_buffer:
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
whcrc->cmd_dma_buf);
error_cmd_buffer:
free_irq(umc_dev->irq, whcrc);
error_request_irq:
iounmap(whcrc->rc_base);
error_ioremap_nocache:
release_mem_region(whcrc->area, whcrc->rc_len);
error_request_region:
return result;
}
/**
* Release RC's UMC resources
*/
static
void whcrc_release_rc_umc(struct whcrc *whcrc)
{
struct umc_dev *umc_dev = whcrc->umc_dev;
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
whcrc->evt_dma_buf);
dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
whcrc->cmd_dma_buf);
free_irq(umc_dev->irq, whcrc);
iounmap(whcrc->rc_base);
release_mem_region(whcrc->area, whcrc->rc_len);
}
/**
* whcrc_start_rc - start a WHCI radio controller
* @whcrc: the radio controller to start
*
* Reset the UMC device, start the radio controller, enable events and
* finally enable interrupts.
*/
static int whcrc_start_rc(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
struct device *dev = &whcrc->umc_dev->dev;
/* Reset the thing */
le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
5000, "hardware reset") < 0)
return -EBUSY;
/* Set the event buffer, start the controller (enable IRQs later) */
le_writel(0, whcrc->rc_base + URCINTR);
le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
5000, "radio controller start") < 0)
return -ETIMEDOUT;
whcrc_enable_events(whcrc);
le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
return 0;
}
/**
* whcrc_stop_rc - stop a WHCI radio controller
* @whcrc: the radio controller to stop
*
* Disable interrupts and cancel any pending event processing work
* before clearing the Run/Stop bit.
*/
static
void whcrc_stop_rc(struct uwb_rc *rc)
{
struct whcrc *whcrc = rc->priv;
struct umc_dev *umc_dev = whcrc->umc_dev;
le_writel(0, whcrc->rc_base + URCINTR);
cancel_work_sync(&whcrc->event_work);
le_writel(0, whcrc->rc_base + URCCMD);
whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop");
}
static void whcrc_init(struct whcrc *whcrc)
{
spin_lock_init(&whcrc->irq_lock);
init_waitqueue_head(&whcrc->cmd_wq);
INIT_WORK(&whcrc->event_work, whcrc_event_work);
}
/**
* Initialize the radio controller.
*
* NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
* IRQ handler we use that to determine if the hw is ready to
* handle events. Looks like a race condition, but it really is
* not.
*/
static
int whcrc_probe(struct umc_dev *umc_dev)
{
int result;
struct uwb_rc *uwb_rc;
struct whcrc *whcrc;
struct device *dev = &umc_dev->dev;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
dev_err(dev, "unable to allocate RC instance\n");
goto error_rc_alloc;
}
whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
if (whcrc == NULL) {
dev_err(dev, "unable to allocate WHC-RC instance\n");
goto error_alloc;
}
whcrc_init(whcrc);
whcrc->umc_dev = umc_dev;
result = whcrc_setup_rc_umc(whcrc);
if (result < 0) {
dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
goto error_setup_rc_umc;
}
whcrc->uwb_rc = uwb_rc;
uwb_rc->owner = THIS_MODULE;
uwb_rc->cmd = whcrc_cmd;
uwb_rc->reset = whcrc_reset;
uwb_rc->start = whcrc_start_rc;
uwb_rc->stop = whcrc_stop_rc;
result = uwb_rc_add(uwb_rc, dev, whcrc);
if (result < 0)
goto error_rc_add;
umc_set_drvdata(umc_dev, whcrc);
return 0;
error_rc_add:
whcrc_release_rc_umc(whcrc);
error_setup_rc_umc:
kfree(whcrc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
return result;
}
/**
* Clean up the radio control resources
*
* When we up the command semaphore, everybody possibly held trying to
* execute a command should be granted entry and then they'll see the
* host is quiescing and up it (so it will chain to the next waiter).
* This should not happen (in any case), as we can only remove when
* there are no handles open...
*/
static void whcrc_remove(struct umc_dev *umc_dev)
{
struct whcrc *whcrc = umc_get_drvdata(umc_dev);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
umc_set_drvdata(umc_dev, NULL);
uwb_rc_rm(uwb_rc);
whcrc_release_rc_umc(whcrc);
kfree(whcrc);
uwb_rc_put(uwb_rc);
}
static int whcrc_pre_reset(struct umc_dev *umc)
{
struct whcrc *whcrc = umc_get_drvdata(umc);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
uwb_rc_pre_reset(uwb_rc);
return 0;
}
static int whcrc_post_reset(struct umc_dev *umc)
{
struct whcrc *whcrc = umc_get_drvdata(umc);
struct uwb_rc *uwb_rc = whcrc->uwb_rc;
return uwb_rc_post_reset(uwb_rc);
}
/* PCI device ID's that we handle [so it gets loaded] */
static struct pci_device_id __used whcrc_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
{ /* empty last entry */ }
};
MODULE_DEVICE_TABLE(pci, whcrc_id_table);
static struct umc_driver whcrc_driver = {
.name = "whc-rc",
.cap_id = UMC_CAP_ID_WHCI_RC,
.probe = whcrc_probe,
.remove = whcrc_remove,
.pre_reset = whcrc_pre_reset,
.post_reset = whcrc_post_reset,
};
static int __init whcrc_driver_init(void)
{
return umc_driver_register(&whcrc_driver);
}
module_init(whcrc_driver_init);
static void __exit whcrc_driver_exit(void)
{
umc_driver_unregister(&whcrc_driver);
}
module_exit(whcrc_driver_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/motorola-kernel-stingray | arch/parisc/hpux/sys_hpux.c | 2890 | 27188 | /*
* Implements HPUX syscalls.
*
* Copyright (C) 1999 Matthew Wilcox <willy with parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 John Marvin <jsm with parisc-linux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2001 Nathan Neulinger <nneul at umr.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/vfs.h>
#include <linux/vmalloc.h>
#include <asm/errno.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
unsigned long hpux_brk(unsigned long addr)
{
/* Sigh. Looks like HP/UX libc relies on kernel bugs. */
return sys_brk(addr + PAGE_SIZE);
}
int hpux_sbrk(void)
{
return -ENOSYS;
}
/* Random other syscalls */
int hpux_nice(int priority_change)
{
return -ENOSYS;
}
int hpux_ptrace(void)
{
return -ENOSYS;
}
int hpux_wait(int __user *stat_loc)
{
return sys_waitpid(-1, stat_loc, 0);
}
int hpux_setpgrp(void)
{
return sys_setpgid(0,0);
}
int hpux_setpgrp3(void)
{
return hpux_setpgrp();
}
#define _SC_CPU_VERSION 10001
#define _SC_OPEN_MAX 4
#define CPU_PA_RISC1_1 0x210
int hpux_sysconf(int which)
{
switch (which) {
case _SC_CPU_VERSION:
return CPU_PA_RISC1_1;
case _SC_OPEN_MAX:
return INT_MAX;
default:
return -EINVAL;
}
}
/*****************************************************************************/
#define HPUX_UTSLEN 9
#define HPUX_SNLEN 15
struct hpux_utsname {
char sysname[HPUX_UTSLEN];
char nodename[HPUX_UTSLEN];
char release[HPUX_UTSLEN];
char version[HPUX_UTSLEN];
char machine[HPUX_UTSLEN];
char idnumber[HPUX_SNLEN];
} ;
struct hpux_ustat {
int32_t f_tfree; /* total free (daddr_t) */
u_int32_t f_tinode; /* total inodes free (ino_t) */
char f_fname[6]; /* filsys name */
char f_fpack[6]; /* filsys pack name */
u_int32_t f_blksize; /* filsys block size (int) */
};
/*
* HPUX's utssys() call. It's a collection of miscellaneous functions,
* alas, so there's no nice way of splitting them up.
*/
/* This function is called from hpux_utssys(); HP-UX implements
* ustat() as an option to utssys().
*
* Now, struct ustat on HP-UX is exactly the same as on Linux, except
* that it contains one addition field on the end, int32_t f_blksize.
* So, we could have written this function to just call the Linux
* sys_ustat(), (defined in linux/fs/super.c), and then just
* added this additional field to the user's structure. But I figure
* if we're gonna be digging through filesystem structures to get
* this, we might as well just do the whole enchilada all in one go.
*
* So, most of this function is almost identical to sys_ustat().
* I have placed comments at the few lines changed or added, to
* aid in porting forward if and when sys_ustat() is changed from
* its form in kernel 2.2.5.
*/
static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
{
struct super_block *s;
struct hpux_ustat tmp; /* Changed to hpux_ustat */
struct kstatfs sbuf;
int err = -EINVAL;
s = user_get_super(dev);
if (s == NULL)
goto out;
err = statfs_by_dentry(s->s_root, &sbuf);
drop_super(s);
if (err)
goto out;
memset(&tmp,0,sizeof(tmp));
tmp.f_tfree = (int32_t)sbuf.f_bfree;
tmp.f_tinode = (u_int32_t)sbuf.f_ffree;
tmp.f_blksize = (u_int32_t)sbuf.f_bsize; /* Added this line */
err = copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
out:
return err;
}
/*
* Wrapper for hpux statfs call. At the moment, just calls the linux native one
* and ignores the extra fields at the end of the hpux statfs struct.
*
*/
typedef int32_t hpux_fsid_t[2]; /* file system ID type */
typedef uint16_t hpux_site_t;
struct hpux_statfs {
int32_t f_type; /* type of info, zero for now */
int32_t f_bsize; /* fundamental file system block size */
int32_t f_blocks; /* total blocks in file system */
int32_t f_bfree; /* free block in fs */
int32_t f_bavail; /* free blocks avail to non-superuser */
int32_t f_files; /* total file nodes in file system */
int32_t f_ffree; /* free file nodes in fs */
hpux_fsid_t f_fsid; /* file system ID */
int32_t f_magic; /* file system magic number */
int32_t f_featurebits; /* file system features */
int32_t f_spare[4]; /* spare for later */
hpux_site_t f_cnode; /* cluster node where mounted */
int16_t f_pad;
};
static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
{
struct hpux_statfs buf;
memset(&buf, 0, sizeof(buf));
buf.f_type = st->f_type;
buf.f_bsize = st->f_bsize;
buf.f_blocks = st->f_blocks;
buf.f_bfree = st->f_bfree;
buf.f_bavail = st->f_bavail;
buf.f_files = st->f_files;
buf.f_ffree = st->f_ffree;
buf.f_fsid[0] = st->f_fsid.val[0];
buf.f_fsid[1] = st->f_fsid.val[1];
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;
return 0;
}
/* hpux statfs */
asmlinkage long hpux_statfs(const char __user *pathname,
struct hpux_statfs __user *buf)
{
struct kstatfs st;
int error = user_statfs(pathname, &st);
if (!error)
error = do_statfs_hpux(&st, buf);
return error;
}
asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
{
struct kstatfs st;
int error = fd_statfs(fd, &st);
if (!error)
error = do_statfs_hpux(&st, buf);
return error;
}
/* This function is called from hpux_utssys(); HP-UX implements
* uname() as an option to utssys().
*
* The form of this function is pretty much copied from sys_olduname(),
* defined in linux/arch/i386/kernel/sys_i386.c.
*/
/* TODO: Are these put_user calls OK? Should they pass an int?
* (I copied it from sys_i386.c like this.)
*/
static int hpux_uname(struct hpux_utsname __user *name)
{
int error;
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE,name,sizeof(struct hpux_utsname)))
return -EFAULT;
down_read(&uts_sem);
error = __copy_to_user(&name->sysname, &utsname()->sysname,
HPUX_UTSLEN - 1);
error |= __put_user(0, name->sysname + HPUX_UTSLEN - 1);
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
HPUX_UTSLEN - 1);
error |= __put_user(0, name->nodename + HPUX_UTSLEN - 1);
error |= __copy_to_user(&name->release, &utsname()->release,
HPUX_UTSLEN - 1);
error |= __put_user(0, name->release + HPUX_UTSLEN - 1);
error |= __copy_to_user(&name->version, &utsname()->version,
HPUX_UTSLEN - 1);
error |= __put_user(0, name->version + HPUX_UTSLEN - 1);
error |= __copy_to_user(&name->machine, &utsname()->machine,
HPUX_UTSLEN - 1);
error |= __put_user(0, name->machine + HPUX_UTSLEN - 1);
up_read(&uts_sem);
/* HP-UX utsname has no domainname field. */
/* TODO: Implement idnumber!!! */
#if 0
error |= __put_user(0,name->idnumber);
error |= __put_user(0,name->idnumber+HPUX_SNLEN-1);
#endif
error = error ? -EFAULT : 0;
return error;
}
/* Note: HP-UX just uses the old suser() function to check perms
* in this system call. We'll use capable(CAP_SYS_ADMIN).
*/
int hpux_utssys(char __user *ubuf, int n, int type)
{
int len;
int error;
switch( type ) {
case 0:
/* uname(): */
return hpux_uname((struct hpux_utsname __user *)ubuf);
break ;
case 1:
/* Obsolete (used to be umask().) */
return -EFAULT ;
break ;
case 2:
/* ustat(): */
return hpux_ustat(new_decode_dev(n),
(struct hpux_ustat __user *)ubuf);
break;
case 3:
/* setuname():
*
* On linux (unlike HP-UX), utsname.nodename
* is the same as the hostname.
*
* sys_sethostname() is defined in linux/kernel/sys.c.
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
return sys_sethostname(ubuf, len);
break ;
case 4:
/* sethostname():
*
* sys_sethostname() is defined in linux/kernel/sys.c.
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
return sys_sethostname(ubuf, len);
break ;
case 5:
/* gethostname():
*
* sys_gethostname() is defined in linux/kernel/sys.c.
*/
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
return sys_gethostname(ubuf, n);
break ;
case 6:
/* Supposedly called from setuname() in libc.
* TODO: When and why is this called?
* Is it ever even called?
*
* This code should look a lot like sys_sethostname(),
* defined in linux/kernel/sys.c. If that gets updated,
* update this code similarly.
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
/**/
/* TODO: print a warning about using this? */
down_write(&uts_sem);
error = -EFAULT;
if (!copy_from_user(utsname()->sysname, ubuf, len)) {
utsname()->sysname[len] = 0;
error = 0;
}
up_write(&uts_sem);
return error;
break ;
case 7:
/* Sets utsname.release, if you're allowed.
* Undocumented. Used by swinstall to change the
* OS version, during OS updates. Yuck!!!
*
* This code should look a lot like sys_sethostname()
* in linux/kernel/sys.c. If that gets updated, update
* this code similarly.
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
/**/
/* TODO: print a warning about this? */
down_write(&uts_sem);
error = -EFAULT;
if (!copy_from_user(utsname()->release, ubuf, len)) {
utsname()->release[len] = 0;
error = 0;
}
up_write(&uts_sem);
return error;
break ;
default:
/* This system call returns -EFAULT if given an unknown type.
* Why not -EINVAL? I don't know, it's just not what they did.
*/
return -EFAULT ;
}
}
int hpux_getdomainname(char __user *name, int len)
{
int nlen;
int err = -EFAULT;
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
if (nlen < len)
len = nlen;
if(len > __NEW_UTS_LEN)
goto done;
if(copy_to_user(name, utsname()->domainname, len))
goto done;
err = 0;
done:
up_read(&uts_sem);
return err;
}
int hpux_pipe(int *kstack_fildes)
{
return do_pipe_flags(kstack_fildes, 0);
}
/* lies - says it works, but it really didn't lock anything */
int hpux_lockf(int fildes, int function, off_t size)
{
return 0;
}
int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
{
char *fsname = NULL;
int len = 0;
int fstype;
/*Unimplemented HP-UX syscall emulation. Syscall #334 (sysfs)
Args: 1 80057bf4 0 400179f0 0 0 0 */
printk(KERN_DEBUG "in hpux_sysfs\n");
printk(KERN_DEBUG "hpux_sysfs called with opcode = %d\n", opcode);
printk(KERN_DEBUG "hpux_sysfs called with arg1='%lx'\n", arg1);
if ( opcode == 1 ) { /* GETFSIND */
char __user *user_fsname = (char __user *)arg1;
len = strlen_user(user_fsname);
printk(KERN_DEBUG "len of arg1 = %d\n", len);
if (len == 0)
return 0;
fsname = kmalloc(len, GFP_KERNEL);
if (!fsname) {
printk(KERN_DEBUG "failed to kmalloc fsname\n");
return 0;
}
if (copy_from_user(fsname, user_fsname, len)) {
printk(KERN_DEBUG "failed to copy_from_user fsname\n");
kfree(fsname);
return 0;
}
/* String could be altered by userspace after strlen_user() */
fsname[len] = '\0';
printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
if ( !strcmp(fsname, "hfs") ) {
fstype = 0;
} else {
fstype = 0;
}
kfree(fsname);
printk(KERN_DEBUG "returning fstype=%d\n", fstype);
return fstype; /* something other than default */
}
return 0;
}
/* Table of syscall names and handle for unimplemented routines */
static const char * const syscall_names[] = {
"nosys", /* 0 */
"exit",
"fork",
"read",
"write",
"open", /* 5 */
"close",
"wait",
"creat",
"link",
"unlink", /* 10 */
"execv",
"chdir",
"time",
"mknod",
"chmod", /* 15 */
"chown",
"brk",
"lchmod",
"lseek",
"getpid", /* 20 */
"mount",
"umount",
"setuid",
"getuid",
"stime", /* 25 */
"ptrace",
"alarm",
NULL,
"pause",
"utime", /* 30 */
"stty",
"gtty",
"access",
"nice",
"ftime", /* 35 */
"sync",
"kill",
"stat",
"setpgrp3",
"lstat", /* 40 */
"dup",
"pipe",
"times",
"profil",
"ki_call", /* 45 */
"setgid",
"getgid",
NULL,
NULL,
NULL, /* 50 */
"acct",
"set_userthreadid",
NULL,
"ioctl",
"reboot", /* 55 */
"symlink",
"utssys",
"readlink",
"execve",
"umask", /* 60 */
"chroot",
"fcntl",
"ulimit",
NULL,
NULL, /* 65 */
"vfork",
NULL,
NULL,
NULL,
NULL, /* 70 */
"mmap",
NULL,
"munmap",
"mprotect",
"madvise", /* 75 */
"vhangup",
"swapoff",
NULL,
"getgroups",
"setgroups", /* 80 */
"getpgrp2",
"setpgid/setpgrp2",
"setitimer",
"wait3",
"swapon", /* 85 */
"getitimer",
NULL,
NULL,
NULL,
"dup2", /* 90 */
NULL,
"fstat",
"select",
NULL,
"fsync", /* 95 */
"setpriority",
NULL,
NULL,
NULL,
"getpriority", /* 100 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 105 */
NULL,
NULL,
"sigvector",
"sigblock",
"sigsetmask", /* 110 */
"sigpause",
"sigstack",
NULL,
NULL,
NULL, /* 115 */
"gettimeofday",
"getrusage",
NULL,
NULL,
"readv", /* 120 */
"writev",
"settimeofday",
"fchown",
"fchmod",
NULL, /* 125 */
"setresuid",
"setresgid",
"rename",
"truncate",
"ftruncate", /* 130 */
NULL,
"sysconf",
NULL,
NULL,
NULL, /* 135 */
"mkdir",
"rmdir",
NULL,
"sigcleanup",
"setcore", /* 140 */
NULL,
"gethostid",
"sethostid",
"getrlimit",
"setrlimit", /* 145 */
NULL,
NULL,
"quotactl",
"get_sysinfo",
NULL, /* 150 */
"privgrp",
"rtprio",
"plock",
NULL,
"lockf", /* 155 */
"semget",
NULL,
"semop",
"msgget",
NULL, /* 160 */
"msgsnd",
"msgrcv",
"shmget",
NULL,
"shmat", /* 165 */
"shmdt",
NULL,
"csp/nsp_init",
"cluster",
"mkrnod", /* 170 */
"test",
"unsp_open",
NULL,
"getcontext",
"osetcontext", /* 175 */
"bigio",
"pipenode",
"lsync",
"getmachineid",
"cnodeid/mysite", /* 180 */
"cnodes/sitels",
"swapclients",
"rmtprocess",
"dskless_stats",
"sigprocmask", /* 185 */
"sigpending",
"sigsuspend",
"sigaction",
NULL,
"nfssvc", /* 190 */
"getfh",
"getdomainname",
"setdomainname",
"async_daemon",
"getdirentries", /* 195 */
NULL,
NULL,
"vfsmount",
NULL,
"waitpid", /* 200 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 205 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 210 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 215 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 220 */
NULL,
NULL,
NULL,
"sigsetreturn",
"sigsetstatemask", /* 225 */
"bfactl",
"cs",
"cds",
NULL,
"pathconf", /* 230 */
"fpathconf",
NULL,
NULL,
"nfs_fcntl",
"ogetacl", /* 235 */
"ofgetacl",
"osetacl",
"ofsetacl",
"pstat",
"getaudid", /* 240 */
"setaudid",
"getaudproc",
"setaudproc",
"getevent",
"setevent", /* 245 */
"audwrite",
"audswitch",
"audctl",
"ogetaccess",
"fsctl", /* 250 */
"ulconnect",
"ulcontrol",
"ulcreate",
"uldest",
"ulrecv", /* 255 */
"ulrecvcn",
"ulsend",
"ulshutdown",
"swapfs",
"fss", /* 260 */
NULL,
NULL,
NULL,
NULL,
NULL, /* 265 */
NULL,
"tsync",
"getnumfds",
"poll",
"getmsg", /* 270 */
"putmsg",
"fchdir",
"getmount_cnt",
"getmount_entry",
"accept", /* 275 */
"bind",
"connect",
"getpeername",
"getsockname",
"getsockopt", /* 280 */
"listen",
"recv",
"recvfrom",
"recvmsg",
"send", /* 285 */
"sendmsg",
"sendto",
"setsockopt",
"shutdown",
"socket", /* 290 */
"socketpair",
"proc_open",
"proc_close",
"proc_send",
"proc_recv", /* 295 */
"proc_sendrecv",
"proc_syscall",
"ipccreate",
"ipcname",
"ipcnamerase", /* 300 */
"ipclookup",
"ipcselect",
"ipcconnect",
"ipcrecvcn",
"ipcsend", /* 305 */
"ipcrecv",
"ipcgetnodename",
"ipcsetnodename",
"ipccontrol",
"ipcshutdown", /* 310 */
"ipcdest",
"semctl",
"msgctl",
"shmctl",
"mpctl", /* 315 */
"exportfs",
"getpmsg",
"putpmsg",
"strioctl",
"msync", /* 320 */
"msleep",
"mwakeup",
"msem_init",
"msem_remove",
"adjtime", /* 325 */
"kload",
"fattach",
"fdetach",
"serialize",
"statvfs", /* 330 */
"fstatvfs",
"lchown",
"getsid",
"sysfs",
NULL, /* 335 */
NULL,
"sched_setparam",
"sched_getparam",
"sched_setscheduler",
"sched_getscheduler", /* 340 */
"sched_yield",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_rr_get_interval",
"clock_settime", /* 345 */
"clock_gettime",
"clock_getres",
"timer_create",
"timer_delete",
"timer_settime", /* 350 */
"timer_gettime",
"timer_getoverrun",
"nanosleep",
"toolbox",
NULL, /* 355 */
"getdents",
"getcontext",
"sysinfo",
"fcntl64",
"ftruncate64", /* 360 */
"fstat64",
"getdirentries64",
"getrlimit64",
"lockf64",
"lseek64", /* 365 */
"lstat64",
"mmap64",
"setrlimit64",
"stat64",
"truncate64", /* 370 */
"ulimit64",
NULL,
NULL,
NULL,
NULL, /* 375 */
NULL,
NULL,
NULL,
NULL,
"setcontext", /* 380 */
"sigaltstack",
"waitid",
"setpgrp",
"recvmsg2",
"sendmsg2", /* 385 */
"socket2",
"socketpair2",
"setregid",
"lwp_create",
"lwp_terminate", /* 390 */
"lwp_wait",
"lwp_suspend",
"lwp_resume",
"lwp_self",
"lwp_abort_syscall", /* 395 */
"lwp_info",
"lwp_kill",
"ksleep",
"kwakeup",
"ksleep_abort", /* 400 */
"lwp_proc_info",
"lwp_exit",
"lwp_continue",
"getacl",
"fgetacl", /* 405 */
"setacl",
"fsetacl",
"getaccess",
"lwp_mutex_init",
"lwp_mutex_lock_sys", /* 410 */
"lwp_mutex_unlock",
"lwp_cond_init",
"lwp_cond_signal",
"lwp_cond_broadcast",
"lwp_cond_wait_sys", /* 415 */
"lwp_getscheduler",
"lwp_setscheduler",
"lwp_getprivate",
"lwp_setprivate",
"lwp_detach", /* 420 */
"mlock",
"munlock",
"mlockall",
"munlockall",
"shm_open", /* 425 */
"shm_unlink",
"sigqueue",
"sigwaitinfo",
"sigtimedwait",
"sigwait", /* 430 */
"aio_read",
"aio_write",
"lio_listio",
"aio_error",
"aio_return", /* 435 */
"aio_cancel",
"aio_suspend",
"aio_fsync",
"mq_open",
"mq_unlink", /* 440 */
"mq_send",
"mq_receive",
"mq_notify",
"mq_setattr",
"mq_getattr", /* 445 */
"ksem_open",
"ksem_unlink",
"ksem_close",
"ksem_destroy",
"lw_sem_incr", /* 450 */
"lw_sem_decr",
"lw_sem_read",
"mq_close",
};
static const int syscall_names_max = 453;
int
hpux_unimplemented(unsigned long arg1,unsigned long arg2,unsigned long arg3,
unsigned long arg4,unsigned long arg5,unsigned long arg6,
unsigned long arg7,unsigned long sc_num)
{
/* NOTE: sc_num trashes arg8 for the few syscalls that actually
* have a valid 8th argument.
*/
const char *name = NULL;
if ( sc_num <= syscall_names_max && sc_num >= 0 ) {
name = syscall_names[sc_num];
}
if ( name ) {
printk(KERN_DEBUG "Unimplemented HP-UX syscall emulation. Syscall #%lu (%s)\n",
sc_num, name);
} else {
printk(KERN_DEBUG "Unimplemented unknown HP-UX syscall emulation. Syscall #%lu\n",
sc_num);
}
printk(KERN_DEBUG " Args: %lx %lx %lx %lx %lx %lx %lx\n",
arg1, arg2, arg3, arg4, arg5, arg6, arg7);
return -ENOSYS;
}
| gpl-2.0 |
TheBootloader/android_kernel_shooter | arch/powerpc/platforms/85xx/stx_gp3.c | 2890 | 4080 | /*
* Based on MPC8560 ADS and arch/ppc stx_gp3 ports
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2008 Freescale Semiconductor Inc.
*
* Dan Malek <dan@embeddededge.com>
* Copyright 2004 Embedded Edge, LLC
*
* Copied from mpc8560_ads.c
* Copyright 2002, 2003 Motorola Inc.
*
* Ported to 2.6, Matt Porter <mporter@kernel.crashing.org>
* Copyright 2004-2005 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#include <sysdev/cpm2_pic.h>
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_CPM2 */
static void __init stx_gp3_pic_init(void)
{
struct mpic *mpic;
struct resource r;
struct device_node *np;
#ifdef CONFIG_CPM2
int irq;
#endif
np = of_find_node_by_type(NULL, "open-pic");
if (!np) {
printk(KERN_ERR "Could not find open-pic node\n");
return;
}
if (of_address_to_resource(np, 0, &r)) {
printk(KERN_ERR "Could not map mpic register space\n");
of_node_put(np);
return;
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
mpic_init(mpic);
#ifdef CONFIG_CPM2
/* Setup CPM2 PIC */
np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
if (np == NULL) {
printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
return;
}
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
of_node_put(np);
printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
return;
}
cpm2_pic_init(np);
of_node_put(np);
irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
/*
* Setup the architecture
*/
static void __init stx_gp3_setup_arch(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("stx_gp3_setup_arch()", 0);
#ifdef CONFIG_CPM2
cpm2_reset();
#endif
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
fsl_add_bridge(np, 1);
#endif
}
static void stx_gp3_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: RPC Electronics STx\n");
seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(stx_gp3, declare_of_platform_devices);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init stx_gp3_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "stx,gp3-8560");
}
define_machine(stx_gp3) {
.name = "STX GP3",
.probe = stx_gp3_probe,
.setup_arch = stx_gp3_setup_arch,
.init_IRQ = stx_gp3_pic_init,
.show_cpuinfo = stx_gp3_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
gearslam/v20j-geeb | fs/nilfs2/ioctl.c | 2890 | 21606 | /*
* ioctl.c - NILFS ioctl operations.
*
* Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Written by Koji Sato <koji@osrg.net>.
*/
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
#include <linux/compat.h> /* compat_ptr() */
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
#include "bmap.h"
#include "cpfile.h"
#include "sufile.h"
#include "dat.h"
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
struct nilfs_argv *argv, int dir,
ssize_t (*dofunc)(struct the_nilfs *,
__u64 *, int,
void *, size_t, size_t))
{
void *buf;
void __user *base = (void __user *)(unsigned long)argv->v_base;
size_t maxmembs, total, n;
ssize_t nr;
int ret, i;
__u64 pos, ppos;
if (argv->v_nmembs == 0)
return 0;
if (argv->v_size > PAGE_SIZE)
return -EINVAL;
buf = (void *)__get_free_pages(GFP_NOFS, 0);
if (unlikely(!buf))
return -ENOMEM;
maxmembs = PAGE_SIZE / argv->v_size;
ret = 0;
total = 0;
pos = argv->v_index;
for (i = 0; i < argv->v_nmembs; i += n) {
n = (argv->v_nmembs - i < maxmembs) ?
argv->v_nmembs - i : maxmembs;
if ((dir & _IOC_WRITE) &&
copy_from_user(buf, base + argv->v_size * i,
argv->v_size * n)) {
ret = -EFAULT;
break;
}
ppos = pos;
nr = dofunc(nilfs, &pos, argv->v_flags, buf, argv->v_size,
n);
if (nr < 0) {
ret = nr;
break;
}
if ((dir & _IOC_READ) &&
copy_to_user(base + argv->v_size * i, buf,
argv->v_size * nr)) {
ret = -EFAULT;
break;
}
total += nr;
if ((size_t)nr < n)
break;
if (pos == ppos)
pos += n;
}
argv->v_nmembs = total;
free_pages((unsigned long)buf, 0);
return ret;
}
static int nilfs_ioctl_getflags(struct inode *inode, void __user *argp)
{
unsigned int flags = NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE;
return put_user(flags, (int __user *)argp);
}
static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
void __user *argp)
{
struct nilfs_transaction_info ti;
unsigned int flags, oldflags;
int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *)argp))
return -EFAULT;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
flags = nilfs_mask_flags(inode->i_mode, flags);
mutex_lock(&inode->i_mutex);
oldflags = NILFS_I(inode)->i_flags;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by the
* relevant capability.
*/
ret = -EPERM;
if (((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
!capable(CAP_LINUX_IMMUTABLE))
goto out;
ret = nilfs_transaction_begin(inode->i_sb, &ti, 0);
if (ret)
goto out;
NILFS_I(inode)->i_flags = (oldflags & ~FS_FL_USER_MODIFIABLE) |
(flags & FS_FL_USER_MODIFIABLE);
nilfs_set_inode_flags(inode);
inode->i_ctime = CURRENT_TIME;
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
nilfs_mark_inode_dirty(inode);
ret = nilfs_transaction_commit(inode->i_sb);
out:
mutex_unlock(&inode->i_mutex);
mnt_drop_write_file(filp);
return ret;
}
static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
{
return put_user(inode->i_generation, (int __user *)argp);
}
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_transaction_info ti;
struct nilfs_cpmode cpmode;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
ret = -EFAULT;
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
goto out;
down_read(&inode->i_sb->s_umount);
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
nilfs->ns_cpfile, cpmode.cm_cno, cpmode.cm_mode);
if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
up_read(&inode->i_sb->s_umount);
out:
mnt_drop_write_file(filp);
return ret;
}
static int
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_transaction_info ti;
__u64 cno;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
ret = -EFAULT;
if (copy_from_user(&cno, argp, sizeof(cno)))
goto out;
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_delete_checkpoint(nilfs->ns_cpfile, cno);
if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
out:
mnt_drop_write_file(filp);
return ret;
}
static ssize_t
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
void *buf, size_t size, size_t nmembs)
{
int ret;
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf,
size, nmembs);
up_read(&nilfs->ns_segctor_sem);
return ret;
}
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_cpstat cpstat;
int ret;
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
up_read(&nilfs->ns_segctor_sem);
if (ret < 0)
return ret;
if (copy_to_user(argp, &cpstat, sizeof(cpstat)))
ret = -EFAULT;
return ret;
}
static ssize_t
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
void *buf, size_t size, size_t nmembs)
{
int ret;
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, size,
nmembs);
up_read(&nilfs->ns_segctor_sem);
return ret;
}
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_sustat sustat;
int ret;
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat);
up_read(&nilfs->ns_segctor_sem);
if (ret < 0)
return ret;
if (copy_to_user(argp, &sustat, sizeof(sustat)))
ret = -EFAULT;
return ret;
}
static ssize_t
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
void *buf, size_t size, size_t nmembs)
{
int ret;
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs);
up_read(&nilfs->ns_segctor_sem);
return ret;
}
static ssize_t
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
void *buf, size_t size, size_t nmembs)
{
struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
struct nilfs_bdesc *bdescs = buf;
int ret, i;
down_read(&nilfs->ns_segctor_sem);
for (i = 0; i < nmembs; i++) {
ret = nilfs_bmap_lookup_at_level(bmap,
bdescs[i].bd_offset,
bdescs[i].bd_level + 1,
&bdescs[i].bd_blocknr);
if (ret < 0) {
if (ret != -ENOENT) {
up_read(&nilfs->ns_segctor_sem);
return ret;
}
bdescs[i].bd_blocknr = 0;
}
}
up_read(&nilfs->ns_segctor_sem);
return nmembs;
}
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_argv argv;
int ret;
if (copy_from_user(&argv, argp, sizeof(argv)))
return -EFAULT;
if (argv.v_size != sizeof(struct nilfs_bdesc))
return -EINVAL;
ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd),
nilfs_ioctl_do_get_bdescs);
if (ret < 0)
return ret;
if (copy_to_user(argp, &argv, sizeof(argv)))
ret = -EFAULT;
return ret;
}
static int nilfs_ioctl_move_inode_block(struct inode *inode,
struct nilfs_vdesc *vdesc,
struct list_head *buffers)
{
struct buffer_head *bh;
int ret;
if (vdesc->vd_flags == 0)
ret = nilfs_gccache_submit_read_data(
inode, vdesc->vd_offset, vdesc->vd_blocknr,
vdesc->vd_vblocknr, &bh);
else
ret = nilfs_gccache_submit_read_node(
inode, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh);
if (unlikely(ret < 0)) {
if (ret == -ENOENT)
printk(KERN_CRIT
"%s: invalid virtual block address (%s): "
"ino=%llu, cno=%llu, offset=%llu, "
"blocknr=%llu, vblocknr=%llu\n",
__func__, vdesc->vd_flags ? "node" : "data",
(unsigned long long)vdesc->vd_ino,
(unsigned long long)vdesc->vd_cno,
(unsigned long long)vdesc->vd_offset,
(unsigned long long)vdesc->vd_blocknr,
(unsigned long long)vdesc->vd_vblocknr);
return ret;
}
if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, "
"cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n",
__func__, vdesc->vd_flags ? "node" : "data",
(unsigned long long)vdesc->vd_ino,
(unsigned long long)vdesc->vd_cno,
(unsigned long long)vdesc->vd_offset,
(unsigned long long)vdesc->vd_blocknr,
(unsigned long long)vdesc->vd_vblocknr);
brelse(bh);
return -EEXIST;
}
list_add_tail(&bh->b_assoc_buffers, buffers);
return 0;
}
static int nilfs_ioctl_move_blocks(struct super_block *sb,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
struct the_nilfs *nilfs = sb->s_fs_info;
struct inode *inode;
struct nilfs_vdesc *vdesc;
struct buffer_head *bh, *n;
LIST_HEAD(buffers);
ino_t ino;
__u64 cno;
int i, ret;
for (i = 0, vdesc = buf; i < nmembs; ) {
ino = vdesc->vd_ino;
cno = vdesc->vd_cno;
inode = nilfs_iget_for_gc(sb, ino, cno);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto failed;
}
if (list_empty(&NILFS_I(inode)->i_dirty)) {
/*
* Add the inode to GC inode list. Garbage Collection
* is serialized and no two processes manipulate the
* list simultaneously.
*/
igrab(inode);
list_add(&NILFS_I(inode)->i_dirty,
&nilfs->ns_gc_inodes);
}
do {
ret = nilfs_ioctl_move_inode_block(inode, vdesc,
&buffers);
if (unlikely(ret < 0)) {
iput(inode);
goto failed;
}
vdesc++;
} while (++i < nmembs &&
vdesc->vd_ino == ino && vdesc->vd_cno == cno);
iput(inode); /* The inode still remains in GC inode list */
}
list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
ret = nilfs_gccache_wait_and_mark_dirty(bh);
if (unlikely(ret < 0)) {
WARN_ON(ret == -EEXIST);
goto failed;
}
list_del_init(&bh->b_assoc_buffers);
brelse(bh);
}
return nmembs;
failed:
list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
list_del_init(&bh->b_assoc_buffers);
brelse(bh);
}
return ret;
}
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
struct inode *cpfile = nilfs->ns_cpfile;
struct nilfs_period *periods = buf;
int ret, i;
for (i = 0; i < nmembs; i++) {
ret = nilfs_cpfile_delete_checkpoints(
cpfile, periods[i].p_start, periods[i].p_end);
if (ret < 0)
return ret;
}
return nmembs;
}
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
int ret;
ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs);
return (ret < 0) ? ret : nmembs;
}
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
struct nilfs_bdesc *bdescs = buf;
int ret, i;
for (i = 0; i < nmembs; i++) {
/* XXX: use macro or inline func to check liveness */
ret = nilfs_bmap_lookup_at_level(bmap,
bdescs[i].bd_offset,
bdescs[i].bd_level + 1,
&bdescs[i].bd_blocknr);
if (ret < 0) {
if (ret != -ENOENT)
return ret;
bdescs[i].bd_blocknr = 0;
}
if (bdescs[i].bd_blocknr != bdescs[i].bd_oblocknr)
/* skip dead block */
continue;
if (bdescs[i].bd_level == 0) {
ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat,
bdescs[i].bd_offset);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
return ret;
}
} else {
ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset,
bdescs[i].bd_level);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
return ret;
}
}
}
return nmembs;
}
int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
struct nilfs_argv *argv, void **kbufs)
{
const char *msg;
int ret;
ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]);
if (ret < 0) {
/*
* can safely abort because checkpoints can be removed
* independently.
*/
msg = "cannot delete checkpoints";
goto failed;
}
ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], kbufs[2]);
if (ret < 0) {
/*
* can safely abort because DAT file is updated atomically
* using a copy-on-write technique.
*/
msg = "cannot delete virtual blocks from DAT file";
goto failed;
}
ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], kbufs[3]);
if (ret < 0) {
/*
* can safely abort because the operation is nondestructive.
*/
msg = "cannot mark copying blocks dirty";
goto failed;
}
return 0;
failed:
printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
msg, ret);
return ret;
}
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct nilfs_argv argv[5];
static const size_t argsz[5] = {
sizeof(struct nilfs_vdesc),
sizeof(struct nilfs_period),
sizeof(__u64),
sizeof(struct nilfs_bdesc),
sizeof(__u64),
};
void __user *base;
void *kbufs[5];
struct the_nilfs *nilfs;
size_t len, nsegs;
int n, ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
ret = -EFAULT;
if (copy_from_user(argv, argp, sizeof(argv)))
goto out;
ret = -EINVAL;
nsegs = argv[4].v_nmembs;
if (argv[4].v_size != argsz[4])
goto out;
if (nsegs > UINT_MAX / sizeof(__u64))
goto out;
/*
* argv[4] points to segment numbers this ioctl cleans. We
* use kmalloc() for its buffer because memory used for the
* segment numbers is enough small.
*/
kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
nsegs * sizeof(__u64));
if (IS_ERR(kbufs[4])) {
ret = PTR_ERR(kbufs[4]);
goto out;
}
nilfs = inode->i_sb->s_fs_info;
for (n = 0; n < 4; n++) {
ret = -EINVAL;
if (argv[n].v_size != argsz[n])
goto out_free;
if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
goto out_free;
if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
goto out_free;
len = argv[n].v_size * argv[n].v_nmembs;
base = (void __user *)(unsigned long)argv[n].v_base;
if (len == 0) {
kbufs[n] = NULL;
continue;
}
kbufs[n] = vmalloc(len);
if (!kbufs[n]) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(kbufs[n], base, len)) {
ret = -EFAULT;
vfree(kbufs[n]);
goto out_free;
}
}
/*
* nilfs_ioctl_move_blocks() will call nilfs_iget_for_gc(),
* which will operates an inode list without blocking.
* To protect the list from concurrent operations,
* nilfs_ioctl_move_blocks should be atomic operation.
*/
if (test_and_set_bit(THE_NILFS_GC_RUNNING, &nilfs->ns_flags)) {
ret = -EBUSY;
goto out_free;
}
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
if (ret < 0)
printk(KERN_ERR "NILFS: GC failed during preparation: "
"cannot read source blocks: err=%d\n", ret);
else
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
nilfs_remove_all_gcinodes(nilfs);
clear_nilfs_gc_running(nilfs);
out_free:
while (--n >= 0)
vfree(kbufs[n]);
kfree(kbufs[4]);
out:
mnt_drop_write_file(filp);
return ret;
}
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
__u64 cno;
int ret;
struct the_nilfs *nilfs;
ret = nilfs_construct_segment(inode->i_sb);
if (ret < 0)
return ret;
if (argp != NULL) {
nilfs = inode->i_sb->s_fs_info;
down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno - 1;
up_read(&nilfs->ns_segctor_sem);
if (copy_to_user(argp, &cno, sizeof(cno)))
return -EFAULT;
}
return 0;
}
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
void __user *argp)
{
__u64 newsize;
int ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = mnt_want_write_file(filp);
if (ret)
goto out;
ret = -EFAULT;
if (copy_from_user(&newsize, argp, sizeof(newsize)))
goto out_drop_write;
ret = nilfs_resize_fs(inode->i_sb, newsize);
out_drop_write:
mnt_drop_write_file(filp);
out:
return ret;
}
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 range[2];
__u64 minseg, maxseg;
unsigned long segbytes;
int ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = -EFAULT;
if (copy_from_user(range, argp, sizeof(__u64[2])))
goto out;
ret = -ERANGE;
if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode))
goto out;
segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes);
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
do_div(maxseg, segbytes);
maxseg--;
ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
out:
return ret;
}
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp,
size_t membsz,
ssize_t (*dofunc)(struct the_nilfs *,
__u64 *, int,
void *, size_t, size_t))
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
struct nilfs_argv argv;
int ret;
if (copy_from_user(&argv, argp, sizeof(argv)))
return -EFAULT;
if (argv.v_size < membsz)
return -EINVAL;
ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc);
if (ret < 0)
return ret;
if (copy_to_user(argp, &argv, sizeof(argv)))
ret = -EFAULT;
return ret;
}
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
void __user *argp = (void __user *)arg;
switch (cmd) {
case FS_IOC_GETFLAGS:
return nilfs_ioctl_getflags(inode, argp);
case FS_IOC_SETFLAGS:
return nilfs_ioctl_setflags(inode, filp, argp);
case FS_IOC_GETVERSION:
return nilfs_ioctl_getversion(inode, argp);
case NILFS_IOCTL_CHANGE_CPMODE:
return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp);
case NILFS_IOCTL_DELETE_CHECKPOINT:
return nilfs_ioctl_delete_checkpoint(inode, filp, cmd, argp);
case NILFS_IOCTL_GET_CPINFO:
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
sizeof(struct nilfs_cpinfo),
nilfs_ioctl_do_get_cpinfo);
case NILFS_IOCTL_GET_CPSTAT:
return nilfs_ioctl_get_cpstat(inode, filp, cmd, argp);
case NILFS_IOCTL_GET_SUINFO:
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
sizeof(struct nilfs_suinfo),
nilfs_ioctl_do_get_suinfo);
case NILFS_IOCTL_GET_SUSTAT:
return nilfs_ioctl_get_sustat(inode, filp, cmd, argp);
case NILFS_IOCTL_GET_VINFO:
return nilfs_ioctl_get_info(inode, filp, cmd, argp,
sizeof(struct nilfs_vinfo),
nilfs_ioctl_do_get_vinfo);
case NILFS_IOCTL_GET_BDESCS:
return nilfs_ioctl_get_bdescs(inode, filp, cmd, argp);
case NILFS_IOCTL_CLEAN_SEGMENTS:
return nilfs_ioctl_clean_segments(inode, filp, cmd, argp);
case NILFS_IOCTL_SYNC:
return nilfs_ioctl_sync(inode, filp, cmd, argp);
case NILFS_IOCTL_RESIZE:
return nilfs_ioctl_resize(inode, filp, argp);
case NILFS_IOCTL_SET_ALLOC_RANGE:
return nilfs_ioctl_set_alloc_range(inode, argp);
default:
return -ENOTTY;
}
}
#ifdef CONFIG_COMPAT
long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
case NILFS_IOCTL_CHANGE_CPMODE:
case NILFS_IOCTL_DELETE_CHECKPOINT:
case NILFS_IOCTL_GET_CPINFO:
case NILFS_IOCTL_GET_CPSTAT:
case NILFS_IOCTL_GET_SUINFO:
case NILFS_IOCTL_GET_SUSTAT:
case NILFS_IOCTL_GET_VINFO:
case NILFS_IOCTL_GET_BDESCS:
case NILFS_IOCTL_CLEAN_SEGMENTS:
case NILFS_IOCTL_SYNC:
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
break;
default:
return -ENOIOCTLCMD;
}
return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#endif
| gpl-2.0 |
utilite-computer/linux-kernel | fs/quota/kqid.c | 2890 | 3248 | #include <linux/fs.h>
#include <linux/quota.h>
#include <linux/export.h>
/**
* qid_eq - Test to see if to kquid values are the same
* @left: A qid value
* @right: Another quid value
*
* Return true if the two qid values are equal and false otherwise.
*/
bool qid_eq(struct kqid left, struct kqid right)
{
if (left.type != right.type)
return false;
switch(left.type) {
case USRQUOTA:
return uid_eq(left.uid, right.uid);
case GRPQUOTA:
return gid_eq(left.gid, right.gid);
case PRJQUOTA:
return projid_eq(left.projid, right.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_eq);
/**
* qid_lt - Test to see if one qid value is less than another
* @left: The possibly lesser qid value
* @right: The possibly greater qid value
*
* Return true if left is less than right and false otherwise.
*/
bool qid_lt(struct kqid left, struct kqid right)
{
if (left.type < right.type)
return true;
if (left.type > right.type)
return false;
switch (left.type) {
case USRQUOTA:
return uid_lt(left.uid, right.uid);
case GRPQUOTA:
return gid_lt(left.gid, right.gid);
case PRJQUOTA:
return projid_lt(left.projid, right.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_lt);
/**
* from_kqid - Create a qid from a kqid user-namespace pair.
* @targ: The user namespace we want a qid in.
* @kuid: The kernel internal quota identifier to start with.
*
* Map @kqid into the user-namespace specified by @targ and
* return the resulting qid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kqid has no mapping in @targ (qid_t)-1 is returned.
*/
qid_t from_kqid(struct user_namespace *targ, struct kqid kqid)
{
switch (kqid.type) {
case USRQUOTA:
return from_kuid(targ, kqid.uid);
case GRPQUOTA:
return from_kgid(targ, kqid.gid);
case PRJQUOTA:
return from_kprojid(targ, kqid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(from_kqid);
/**
* from_kqid_munged - Create a qid from a kqid user-namespace pair.
* @targ: The user namespace we want a qid in.
* @kqid: The kernel internal quota identifier to start with.
*
* Map @kqid into the user-namespace specified by @targ and
* return the resulting qid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kqid from_kqid_munged never fails and always
* returns a valid projid. This makes from_kqid_munged
* appropriate for use in places where failing to provide
* a qid_t is not a good option.
*
* If @kqid has no mapping in @targ the kqid.type specific
* overflow identifier is returned.
*/
qid_t from_kqid_munged(struct user_namespace *targ, struct kqid kqid)
{
switch (kqid.type) {
case USRQUOTA:
return from_kuid_munged(targ, kqid.uid);
case GRPQUOTA:
return from_kgid_munged(targ, kqid.gid);
case PRJQUOTA:
return from_kprojid_munged(targ, kqid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(from_kqid_munged);
/**
* qid_valid - Report if a valid value is stored in a kqid.
* @qid: The kernel internal quota identifier to test.
*/
bool qid_valid(struct kqid qid)
{
switch (qid.type) {
case USRQUOTA:
return uid_valid(qid.uid);
case GRPQUOTA:
return gid_valid(qid.gid);
case PRJQUOTA:
return projid_valid(qid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_valid);
| gpl-2.0 |
santod/KK_sense_kernel_htc_m8vzw | drivers/spi/spidev.c | 3402 | 19723 | /*
* Simple synchronous userspace interface to SPI devices
*
* Copyright (C) 2006 SWAPP
* Andrea Paterniani <a.paterniani@swapp-eng.it>
* Copyright (C) 2007 David Brownell (simplification, cleanup)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
#include <asm/uaccess.h>
/*
* This supports access to SPI devices using normal userspace I/O calls.
* Note that while traditional UNIX/POSIX I/O semantics are half duplex,
* and often mask message boundaries, full SPI support requires full duplex
* transfers. There are several kinds of internal message boundaries to
* handle chipselect management and other protocol options.
*
* SPI has a character major number assigned. We allocate minor numbers
* dynamically using a bitmask. You must use hotplug tools, such as udev
* (or mdev with busybox) to create and destroy the /dev/spidevB.C device
* nodes, since there is no fixed association of minor numbers with any
* particular SPI bus or device.
*/
#define SPIDEV_MAJOR 153 /* assigned */
#define N_SPI_MINORS 32 /* ... up to 256 */
static DECLARE_BITMAP(minors, N_SPI_MINORS);
/* Bit masks for spi_device.mode management. Note that incorrect
* settings for some settings can cause *lots* of trouble for other
* devices on a shared bus:
*
* - CS_HIGH ... this device will be active when it shouldn't be
* - 3WIRE ... when active, it won't behave as it should
* - NO_CS ... there will be no explicit message boundaries; this
* is completely incompatible with the shared bus model
* - READY ... transfers may proceed when they shouldn't.
*
* REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
spinlock_t spi_lock;
struct spi_device *spi;
struct list_head device_entry;
/* buffer is NULL unless this device is open (users > 0) */
struct mutex buf_lock;
unsigned users;
u8 *buffer;
u8 *bufferrx;
};
static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_lock);
static unsigned bufsiz = 4096;
module_param(bufsiz, uint, S_IRUGO);
MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*
* This can be used for testing the controller, given the busnum and the
* cs required to use. If those parameters are used, spidev is
* dynamically added as device on the busnum, and messages can be sent
* via this interface.
*/
static int busnum = -1;
module_param(busnum, int, S_IRUGO);
MODULE_PARM_DESC(busnum, "bus num of the controller");
static int chipselect = -1;
module_param(chipselect, int, S_IRUGO);
MODULE_PARM_DESC(chipselect, "chip select of the desired device");
static int maxspeed = 10000000;
module_param(maxspeed, int, S_IRUGO);
MODULE_PARM_DESC(maxspeed, "max_speed of the desired device");
static int spimode = SPI_MODE_3;
module_param(spimode, int, S_IRUGO);
MODULE_PARM_DESC(spimode, "mode of the desired device");
static struct spi_device *spi;
/*-------------------------------------------------------------------------*/
/*
* We can't use the standard synchronous wrappers for file I/O; we
* need to protect against async removal of the underlying spi_device.
*/
static void spidev_complete(void *arg)
{
complete(arg);
}
static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
message->complete = spidev_complete;
message->context = &done;
spin_lock_irq(&spidev->spi_lock);
if (spidev->spi == NULL)
status = -ESHUTDOWN;
else
status = spi_async(spidev->spi, message);
spin_unlock_irq(&spidev->spi_lock);
if (status == 0) {
wait_for_completion(&done);
status = message->status;
if (status == 0)
status = message->actual_length;
}
return status;
}
static inline ssize_t
spidev_sync_write(struct spidev_data *spidev, size_t len)
{
struct spi_transfer t = {
.tx_buf = spidev->buffer,
.len = len,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spidev_sync(spidev, &m);
}
static inline ssize_t
spidev_sync_read(struct spidev_data *spidev, size_t len)
{
struct spi_transfer t = {
.rx_buf = spidev->buffer,
.len = len,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spidev_sync(spidev, &m);
}
/*-------------------------------------------------------------------------*/
/* Read-only message with current device setup */
static ssize_t
spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status = 0;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
status = spidev_sync_read(spidev, count);
if (status > 0) {
unsigned long missing;
missing = copy_to_user(buf, spidev->buffer, status);
if (missing == status)
status = -EFAULT;
else
status = status - missing;
}
mutex_unlock(&spidev->buf_lock);
return status;
}
/* Write-only message with current device setup */
static ssize_t
spidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status = 0;
unsigned long missing;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
missing = copy_from_user(spidev->buffer, buf, count);
if (missing == 0) {
status = spidev_sync_write(spidev, count);
} else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
return status;
}
static int spidev_message(struct spidev_data *spidev,
struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
{
struct spi_message msg;
struct spi_transfer *k_xfers;
struct spi_transfer *k_tmp;
struct spi_ioc_transfer *u_tmp;
unsigned n, total;
u8 *buf, *bufrx;
int status = -EFAULT;
spi_message_init(&msg);
k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
if (k_xfers == NULL)
return -ENOMEM;
/* Construct spi_message, copying any tx data to bounce buffer.
* We walk the array of user-provided transfers, using each one
* to initialize a kernel version of the same transfer.
*/
buf = spidev->buffer;
bufrx = spidev->bufferrx;
total = 0;
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
k_tmp->len = u_tmp->len;
total += k_tmp->len;
if (total > bufsiz) {
status = -EMSGSIZE;
goto done;
}
if (u_tmp->rx_buf) {
k_tmp->rx_buf = bufrx;
if (!access_ok(VERIFY_WRITE, (u8 __user *)
(uintptr_t) u_tmp->rx_buf,
u_tmp->len))
goto done;
}
if (u_tmp->tx_buf) {
k_tmp->tx_buf = buf;
if (copy_from_user(buf, (const u8 __user *)
(uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
}
buf += k_tmp->len;
bufrx += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %zd %s%s%s%dbits %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
status = spidev_sync(spidev, &msg);
if (status < 0)
goto done;
/* copy any rx data out of bounce buffer */
buf = spidev->bufferrx;
for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
if (u_tmp->rx_buf) {
if (__copy_to_user((u8 __user *)
(uintptr_t) u_tmp->rx_buf, buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
}
}
buf += u_tmp->len;
}
status = total;
done:
kfree(k_xfers);
return status;
}
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
u32 tmp;
unsigned n_ioc;
struct spi_ioc_transfer *ioc;
/* Check type and command number */
if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
return -ENOTTY;
/* Check access direction once here; don't repeat below.
* IOC_DIR is from the user perspective, while access_ok is
* from the kernel perspective; so they look reversed.
*/
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE,
(void __user *)arg, _IOC_SIZE(cmd));
if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ,
(void __user *)arg, _IOC_SIZE(cmd));
if (err)
return -EFAULT;
/* guard against device removal before, or while,
* we issue this ioctl.
*/
spidev = filp->private_data;
spin_lock_irq(&spidev->spi_lock);
spi = spi_dev_get(spidev->spi);
spin_unlock_irq(&spidev->spi_lock);
if (spi == NULL)
return -ESHUTDOWN;
/* use the buffer lock here for triple duty:
* - prevent I/O (from us) so calling spi_setup() is safe;
* - prevent concurrent SPI_IOC_WR_* from morphing
* data fields while SPI_IOC_RD_* reads them;
* - SPI_IOC_MESSAGE needs the buffer locked "normally".
*/
mutex_lock(&spidev->buf_lock);
switch (cmd) {
/* read requests */
case SPI_IOC_RD_MODE:
retval = __put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
case SPI_IOC_RD_LSB_FIRST:
retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
break;
case SPI_IOC_RD_BITS_PER_WORD:
retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
break;
case SPI_IOC_RD_MAX_SPEED_HZ:
retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg);
break;
/* write requests */
case SPI_IOC_WR_MODE:
retval = __get_user(tmp, (u8 __user *)arg);
if (retval == 0) {
u8 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
break;
}
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = (u8)tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u8 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
else
spi->mode &= ~SPI_LSB_FIRST;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
dev_dbg(&spi->dev, "%csb first\n",
tmp ? 'l' : 'm');
}
break;
case SPI_IOC_WR_BITS_PER_WORD:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u8 save = spi->bits_per_word;
spi->bits_per_word = tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->bits_per_word = save;
else
dev_dbg(&spi->dev, "%d bits per word\n", tmp);
}
break;
case SPI_IOC_WR_MAX_SPEED_HZ:
retval = __get_user(tmp, (__u32 __user *)arg);
if (retval == 0) {
u32 save = spi->max_speed_hz;
spi->max_speed_hz = tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->max_speed_hz = save;
else
dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
}
break;
default:
/* segmented and/or full-duplex I/O request */
if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
|| _IOC_DIR(cmd) != _IOC_WRITE) {
retval = -ENOTTY;
break;
}
tmp = _IOC_SIZE(cmd);
if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
retval = -EINVAL;
break;
}
n_ioc = tmp / sizeof(struct spi_ioc_transfer);
if (n_ioc == 0)
break;
/* copy into scratch area */
ioc = kmalloc(tmp, GFP_KERNEL);
if (!ioc) {
retval = -ENOMEM;
break;
}
if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
kfree(ioc);
retval = -EFAULT;
break;
}
/* translate to spi_message, execute */
retval = spidev_message(spidev, ioc, n_ioc);
kfree(ioc);
break;
}
mutex_unlock(&spidev->buf_lock);
spi_dev_put(spi);
return retval;
}
#ifdef CONFIG_COMPAT
static long
spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#else
#define spidev_compat_ioctl NULL
#endif /* CONFIG_COMPAT */
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
int status = -ENXIO;
mutex_lock(&device_list_lock);
list_for_each_entry(spidev, &device_list, device_entry) {
if (spidev->devt == inode->i_rdev) {
status = 0;
break;
}
}
if (status == 0) {
if (!spidev->buffer) {
spidev->buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->buffer) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
}
}
if (!spidev->bufferrx) {
spidev->bufferrx = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->bufferrx) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
kfree(spidev->buffer);
spidev->buffer = NULL;
status = -ENOMEM;
}
}
if (status == 0) {
spidev->users++;
filp->private_data = spidev;
nonseekable_open(inode, filp);
}
} else
pr_debug("spidev: nothing for minor %d\n", iminor(inode));
mutex_unlock(&device_list_lock);
return status;
}
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
int status = 0;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
filp->private_data = NULL;
/* last close? */
spidev->users--;
if (!spidev->users) {
int dofree;
kfree(spidev->buffer);
spidev->buffer = NULL;
kfree(spidev->bufferrx);
spidev->bufferrx = NULL;
/* ... after we unbound from the underlying device? */
spin_lock_irq(&spidev->spi_lock);
dofree = (spidev->spi == NULL);
spin_unlock_irq(&spidev->spi_lock);
if (dofree)
kfree(spidev);
}
mutex_unlock(&device_list_lock);
return status;
}
static const struct file_operations spidev_fops = {
.owner = THIS_MODULE,
/* REVISIT switch to aio primitives, so that userspace
* gets more complete API coverage. It'll simplify things
* too, except for the locking.
*/
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
};
/*-------------------------------------------------------------------------*/
/* The main reason to have this class is to make mdev/udev create the
* /dev/spidevB.C character device nodes exposing our userspace API.
* It also simplifies memory management.
*/
static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
static int __devinit spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
unsigned long minor;
/* Allocate driver data */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
return -ENOMEM;
/* Initialize the driver data */
spidev->spi = spi;
spin_lock_init(&spidev->spi_lock);
mutex_init(&spidev->buf_lock);
INIT_LIST_HEAD(&spidev->device_entry);
/* If we can allocate a minor number, hook up this device.
* Reusing minors is fine so long as udev or mdev is working.
*/
mutex_lock(&device_list_lock);
minor = find_first_zero_bit(minors, N_SPI_MINORS);
if (minor < N_SPI_MINORS) {
struct device *dev;
spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
}
if (status == 0) {
set_bit(minor, minors);
list_add(&spidev->device_entry, &device_list);
}
mutex_unlock(&device_list_lock);
if (status == 0)
spi_set_drvdata(spi, spidev);
else
kfree(spidev);
return status;
}
static int __devexit spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
spi_set_drvdata(spi, NULL);
spin_unlock_irq(&spidev->spi_lock);
/* prevent new opens */
mutex_lock(&device_list_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
clear_bit(MINOR(spidev->devt), minors);
if (spidev->users == 0)
kfree(spidev);
mutex_unlock(&device_list_lock);
return 0;
}
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
},
.probe = spidev_probe,
.remove = __devexit_p(spidev_remove),
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
* the underlying controller. The refrigerator handles
* most issues; the controller driver handles the rest.
*/
};
/*-------------------------------------------------------------------------*/
static int __init spidev_init(void)
{
int status;
/* Claim our 256 reserved device numbers. Then register a class
* that will key udev/mdev to add/remove /dev nodes. Last, register
* the driver which manages those device numbers.
*/
BUILD_BUG_ON(N_SPI_MINORS > 256);
status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
if (status < 0)
return status;
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
status = PTR_ERR(spidev_class);
goto error_class;
}
status = spi_register_driver(&spidev_spi_driver);
if (status < 0)
goto error_register;
if (busnum != -1 && chipselect != -1) {
struct spi_board_info chip = {
.modalias = "spidev",
.mode = spimode,
.bus_num = busnum,
.chip_select = chipselect,
.max_speed_hz = maxspeed,
};
struct spi_master *master;
master = spi_busnum_to_master(busnum);
if (!master) {
status = -ENODEV;
goto error_busnum;
}
/* We create a virtual device that will sit on the bus */
spi = spi_new_device(master, &chip);
if (!spi) {
status = -EBUSY;
goto error_mem;
}
dev_dbg(&spi->dev, "busnum=%d cs=%d bufsiz=%d maxspeed=%d",
busnum, chipselect, bufsiz, maxspeed);
}
return 0;
error_mem:
error_busnum:
spi_unregister_driver(&spidev_spi_driver);
error_register:
class_destroy(spidev_class);
error_class:
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return status;
}
module_init(spidev_init);
static void __exit spidev_exit(void)
{
if (spi) {
spi_unregister_device(spi);
spi = NULL;
}
spi_unregister_driver(&spidev_spi_driver);
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
module_exit(spidev_exit);
MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
MODULE_DESCRIPTION("User mode SPI device interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:spidev");
| gpl-2.0 |
KangDroid/android_kernel_lge_hammerhead | sound/soc/omap/zoom2.c | 4938 | 5957 | /*
* zoom2.c -- SoC audio for Zoom2
*
* Author: Misael Lopez Cruz <x0052729@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/gpio.h>
#include <mach/board-zoom.h>
#include <plat/mcbsp.h>
/* Register descriptions for twl4030 codec part */
#include <linux/mfd/twl4030-audio.h>
#include <linux/module.h>
#include "omap-mcbsp.h"
#include "omap-pcm.h"
#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
static int zoom2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
/* Set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
SND_SOC_CLOCK_IN);
if (ret < 0) {
printk(KERN_ERR "can't set codec system clock\n");
return ret;
}
return 0;
}
static struct snd_soc_ops zoom2_ops = {
.hw_params = zoom2_hw_params,
};
/* Zoom2 machine DAPM */
static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Ext Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_LINE("Aux In", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* External Mics: MAINMIC, SUBMIC with bias*/
{"MAINMIC", NULL, "Mic Bias 1"},
{"SUBMIC", NULL, "Mic Bias 2"},
{"Mic Bias 1", NULL, "Ext Mic"},
{"Mic Bias 2", NULL, "Ext Mic"},
/* External Speakers: HFL, HFR */
{"Ext Spk", NULL, "HFL"},
{"Ext Spk", NULL, "HFR"},
/* Headset Stereophone: HSOL, HSOR */
{"Headset Stereophone", NULL, "HSOL"},
{"Headset Stereophone", NULL, "HSOR"},
/* Headset Mic: HSMIC with bias */
{"HSMIC", NULL, "Headset Mic Bias"},
{"Headset Mic Bias", NULL, "Headset Mic"},
/* Aux In: AUXL, AUXR */
{"Aux In", NULL, "AUXL"},
{"Aux In", NULL, "AUXR"},
};
static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* TWL4030 not connected pins */
snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
snd_soc_dapm_nc_pin(dapm, "EARPIECE");
snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
snd_soc_dapm_nc_pin(dapm, "CARKITL");
snd_soc_dapm_nc_pin(dapm, "CARKITR");
return 0;
}
static int zoom2_twl4030_voice_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
unsigned short reg;
/* Enable voice interface */
reg = codec->driver->read(codec, TWL4030_REG_VOICE_IF);
reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
codec->driver->write(codec, TWL4030_REG_VOICE_IF, reg);
return 0;
}
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link zoom2_dai[] = {
{
.name = "TWL4030 I2S",
.stream_name = "TWL4030 Audio",
.cpu_dai_name = "omap-mcbsp.2",
.codec_dai_name = "twl4030-hifi",
.platform_name = "omap-pcm-audio",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = zoom2_twl4030_init,
.ops = &zoom2_ops,
},
{
.name = "TWL4030 PCM",
.stream_name = "TWL4030 Voice",
.cpu_dai_name = "omap-mcbsp.3",
.codec_dai_name = "twl4030-voice",
.platform_name = "omap-pcm-audio",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = zoom2_twl4030_voice_init,
.ops = &zoom2_ops,
},
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_zoom2 = {
.name = "Zoom2",
.owner = THIS_MODULE,
.dai_link = zoom2_dai,
.num_links = ARRAY_SIZE(zoom2_dai),
.dapm_widgets = zoom2_twl4030_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(zoom2_twl4030_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static struct platform_device *zoom2_snd_device;
static int __init zoom2_soc_init(void)
{
int ret;
if (!machine_is_omap_zoom2())
return -ENODEV;
printk(KERN_INFO "Zoom2 SoC init\n");
zoom2_snd_device = platform_device_alloc("soc-audio", -1);
if (!zoom2_snd_device) {
printk(KERN_ERR "Platform device allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(zoom2_snd_device, &snd_soc_zoom2);
ret = platform_device_add(zoom2_snd_device);
if (ret)
goto err1;
BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
BUG_ON(gpio_request(ZOOM2_HEADSET_EXTMUTE_GPIO, "ext_mute") < 0);
gpio_direction_output(ZOOM2_HEADSET_EXTMUTE_GPIO, 0);
return 0;
err1:
printk(KERN_ERR "Unable to add platform device\n");
platform_device_put(zoom2_snd_device);
return ret;
}
module_init(zoom2_soc_init);
static void __exit zoom2_soc_exit(void)
{
gpio_free(ZOOM2_HEADSET_MUX_GPIO);
gpio_free(ZOOM2_HEADSET_EXTMUTE_GPIO);
platform_device_unregister(zoom2_snd_device);
}
module_exit(zoom2_soc_exit);
MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
MODULE_DESCRIPTION("ALSA SoC Zoom2");
MODULE_LICENSE("GPL");
| gpl-2.0 |
klquicksall/Mako-Nexus-4-Stock-4.2.2 | drivers/mmc/host/at91_mci.c | 4938 | 31481 | /*
* linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
*
* Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
*
* Copyright (C) 2006 Malcolm Noyes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
This is the AT91 MCI driver that has been tested with both MMC cards
and SD-cards. Boards that support write protect are now supported.
The CCAT91SBC001 board does not support SD cards.
The three entry points are at91_mci_request, at91_mci_set_ios
and at91_mci_get_ro.
SET IOS
This configures the device to put it into the correct mode and clock speed
required.
MCI REQUEST
MCI request processes the commands sent in the mmc_request structure. This
can consist of a processing command and a stop command in the case of
multiple block transfers.
There are three main types of request, commands, reads and writes.
Commands are straight forward. The command is submitted to the controller and
the request function returns. When the controller generates an interrupt to indicate
the command is finished, the response to the command are read and the mmc_request_done
function called to end the request.
Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
controller to manage the transfers.
A read is done from the controller directly to the scatterlist passed in from the request.
Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
A write is slightly different in that the bytes to write are read from the scatterlist
into a dma memory buffer (this is in case the source buffer should be read only). The
entire write buffer is then done from this single dma memory buffer.
The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
GET RO
Gets the status of the write protect pin, if available.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/gpio.h>
#include <mach/board.h>
#include <mach/cpu.h>
#include "at91_mci.h"
#define DRIVER_NAME "at91_mci"
static inline int at91mci_is_mci1rev2xx(void)
{
return ( cpu_is_at91sam9260()
|| cpu_is_at91sam9263()
|| cpu_is_at91sam9rl()
|| cpu_is_at91sam9g10()
|| cpu_is_at91sam9g20()
);
}
#define FL_SENT_COMMAND (1 << 0)
#define FL_SENT_STOP (1 << 1)
#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
| AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
| AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
#define MCI_BLKSIZE 512
#define MCI_MAXBLKSIZE 4095
#define MCI_BLKATONCE 256
#define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE)
/*
* Low level type for this driver
*/
struct at91mci_host
{
struct mmc_host *mmc;
struct mmc_command *cmd;
struct mmc_request *request;
void __iomem *baseaddr;
int irq;
struct at91_mmc_data *board;
int present;
struct clk *mci_clk;
/*
* Flag indicating when the command has been sent. This is used to
* work out whether or not to send the stop
*/
unsigned int flags;
/* flag for current bus settings */
u32 bus_mode;
/* DMA buffer used for transmitting */
unsigned int* buffer;
dma_addr_t physical_address;
unsigned int total_length;
/* Latest in the scatterlist that has been enabled for transfer, but not freed */
int in_use_index;
/* Latest in the scatterlist that has been enabled for transfer */
int transfer_index;
/* Timer for timeouts */
struct timer_list timer;
};
/*
* Reset the controller and restore most of the state
*/
static void at91_reset_host(struct at91mci_host *host)
{
unsigned long flags;
u32 mr;
u32 sdcr;
u32 dtor;
u32 imr;
local_irq_save(flags);
imr = at91_mci_read(host, AT91_MCI_IMR);
at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
/* save current state */
mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
sdcr = at91_mci_read(host, AT91_MCI_SDCR);
dtor = at91_mci_read(host, AT91_MCI_DTOR);
/* reset the controller */
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
/* restore state */
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
at91_mci_write(host, AT91_MCI_MR, mr);
at91_mci_write(host, AT91_MCI_SDCR, sdcr);
at91_mci_write(host, AT91_MCI_DTOR, dtor);
at91_mci_write(host, AT91_MCI_IER, imr);
/* make sure sdio interrupts will fire */
at91_mci_read(host, AT91_MCI_SR);
local_irq_restore(flags);
}
static void at91_timeout_timer(unsigned long data)
{
struct at91mci_host *host;
host = (struct at91mci_host *)data;
if (host->request) {
dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
if (host->cmd && host->cmd->data) {
host->cmd->data->error = -ETIMEDOUT;
} else {
if (host->cmd)
host->cmd->error = -ETIMEDOUT;
else
host->request->cmd->error = -ETIMEDOUT;
}
at91_reset_host(host);
mmc_request_done(host->mmc, host->request);
}
}
/*
* Copy from sg to a dma block - used for transfers
*/
static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
{
unsigned int len, i, size;
unsigned *dmabuf = host->buffer;
size = data->blksz * data->blocks;
len = data->sg_len;
/* MCI1 rev2xx Data Write Operation and number of bytes erratum */
if (at91mci_is_mci1rev2xx())
if (host->total_length == 12)
memset(dmabuf, 0, 12);
/*
* Just loop through all entries. Size might not
* be the entire list though so make sure that
* we do not transfer too much.
*/
for (i = 0; i < len; i++) {
struct scatterlist *sg;
int amount;
unsigned int *sgbuffer;
sg = &data->sg[i];
sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
int index;
for (index = 0; index < (amount / 4); index++)
*dmabuf++ = swab32(sgbuffer[index]);
} else {
char *tmpv = (char *)dmabuf;
memcpy(tmpv, sgbuffer, amount);
tmpv += amount;
dmabuf = (unsigned *)tmpv;
}
kunmap_atomic(sgbuffer);
if (size == 0)
break;
}
/*
* Check that we didn't get a request to transfer
* more data than can fit into the SG list.
*/
BUG_ON(size != 0);
}
/*
* Handle after a dma read
*/
static void at91_mci_post_dma_read(struct at91mci_host *host)
{
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int len, i, size;
unsigned *dmabuf = host->buffer;
pr_debug("post dma read\n");
cmd = host->cmd;
if (!cmd) {
pr_debug("no command\n");
return;
}
data = cmd->data;
if (!data) {
pr_debug("no data\n");
return;
}
size = data->blksz * data->blocks;
len = data->sg_len;
at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
for (i = 0; i < len; i++) {
struct scatterlist *sg;
int amount;
unsigned int *sgbuffer;
sg = &data->sg[i];
sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
int index;
for (index = 0; index < (amount / 4); index++)
sgbuffer[index] = swab32(*dmabuf++);
} else {
char *tmpv = (char *)dmabuf;
memcpy(sgbuffer, tmpv, amount);
tmpv += amount;
dmabuf = (unsigned *)tmpv;
}
flush_kernel_dcache_page(sg_page(sg));
kunmap_atomic(sgbuffer);
data->bytes_xfered += amount;
if (size == 0)
break;
}
pr_debug("post dma read done\n");
}
/*
* Handle transmitted data
*/
static void at91_mci_handle_transmitted(struct at91mci_host *host)
{
struct mmc_command *cmd;
struct mmc_data *data;
pr_debug("Handling the transmit\n");
/* Disable the transfer */
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
/* Now wait for cmd ready */
at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
cmd = host->cmd;
if (!cmd) return;
data = cmd->data;
if (!data) return;
if (cmd->data->blocks > 1) {
pr_debug("multiple write : wait for BLKE...\n");
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
} else
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
}
/*
* Update bytes tranfered count during a write operation
*/
static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
{
struct mmc_data *data;
/* always deal with the effective request (and not the current cmd) */
if (host->request->cmd && host->request->cmd->error != 0)
return;
if (host->request->data) {
data = host->request->data;
if (data->flags & MMC_DATA_WRITE) {
/* card is in IDLE mode now */
pr_debug("-> bytes_xfered %d, total_length = %d\n",
data->bytes_xfered, host->total_length);
data->bytes_xfered = data->blksz * data->blocks;
}
}
}
/*Handle after command sent ready*/
static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
{
if (!host->cmd)
return 1;
else if (!host->cmd->data) {
if (host->flags & FL_SENT_STOP) {
/*After multi block write, we must wait for NOTBUSY*/
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
} else return 1;
} else if (host->cmd->data->flags & MMC_DATA_WRITE) {
/*After sendding multi-block-write command, start DMA transfer*/
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
}
/* command not completed, have to wait */
return 0;
}
/*
* Enable the controller
*/
static void at91_mci_enable(struct at91mci_host *host)
{
unsigned int mr;
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
mr = AT91_MCI_PDCMODE | 0x34a;
if (at91mci_is_mci1rev2xx())
mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
at91_mci_write(host, AT91_MCI_MR, mr);
/* use Slot A or B (only one at same time) */
at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
}
/*
* Disable the controller
*/
static void at91_mci_disable(struct at91mci_host *host)
{
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
}
/*
* Send a command
*/
static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
{
unsigned int cmdr, mr;
unsigned int block_length;
struct mmc_data *data = cmd->data;
unsigned int blocks;
unsigned int ier = 0;
host->cmd = cmd;
/* Needed for leaving busy state before CMD1 */
if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
pr_debug("Clearing timeout\n");
at91_mci_write(host, AT91_MCI_ARGR, 0);
at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
/* spin */
pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
}
}
cmdr = cmd->opcode;
if (mmc_resp_type(cmd) == MMC_RSP_NONE)
cmdr |= AT91_MCI_RSPTYP_NONE;
else {
/* if a response is expected then allow maximum response latancy */
cmdr |= AT91_MCI_MAXLAT;
/* set 136 bit response for R2, 48 bit response otherwise */
if (mmc_resp_type(cmd) == MMC_RSP_R2)
cmdr |= AT91_MCI_RSPTYP_136;
else
cmdr |= AT91_MCI_RSPTYP_48;
}
if (data) {
if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
if (data->blksz & 0x3) {
pr_debug("Unsupported block size\n");
cmd->error = -EINVAL;
mmc_request_done(host->mmc, host->request);
return;
}
if (data->flags & MMC_DATA_STREAM) {
pr_debug("Stream commands not supported\n");
cmd->error = -EINVAL;
mmc_request_done(host->mmc, host->request);
return;
}
}
block_length = data->blksz;
blocks = data->blocks;
/* always set data start - also set direction flag for read */
if (data->flags & MMC_DATA_READ)
cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
else if (data->flags & MMC_DATA_WRITE)
cmdr |= AT91_MCI_TRCMD_START;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
} else {
if (data->flags & MMC_DATA_STREAM)
cmdr |= AT91_MCI_TRTYP_STREAM;
if (data->blocks > 1)
cmdr |= AT91_MCI_TRTYP_MULTIPLE;
}
}
else {
block_length = 0;
blocks = 0;
}
if (host->flags & FL_SENT_STOP)
cmdr |= AT91_MCI_TRCMD_STOP;
if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
cmdr |= AT91_MCI_OPDCMD;
/*
* Set the arguments and send the command
*/
pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
if (!data) {
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
at91_mci_write(host, ATMEL_PDC_RPR, 0);
at91_mci_write(host, ATMEL_PDC_RCR, 0);
at91_mci_write(host, ATMEL_PDC_RNPR, 0);
at91_mci_write(host, ATMEL_PDC_RNCR, 0);
at91_mci_write(host, ATMEL_PDC_TPR, 0);
at91_mci_write(host, ATMEL_PDC_TCR, 0);
at91_mci_write(host, ATMEL_PDC_TNPR, 0);
at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ier = AT91_MCI_CMDRDY;
} else {
/* zero block length and PDC mode */
mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
mr |= (block_length << 16);
mr |= AT91_MCI_PDCMODE;
at91_mci_write(host, AT91_MCI_MR, mr);
if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
at91_mci_write(host, AT91_MCI_BLKR,
AT91_MCI_BLKR_BCNT(blocks) |
AT91_MCI_BLKR_BLKLEN(block_length));
/*
* Disable the PDC controller
*/
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
if (cmdr & AT91_MCI_TRCMD_START) {
data->bytes_xfered = 0;
host->transfer_index = 0;
host->in_use_index = 0;
if (cmdr & AT91_MCI_TRDIR) {
/*
* Handle a read
*/
host->total_length = 0;
at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address);
at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ?
(blocks * block_length) : (blocks * block_length) / 4);
at91_mci_write(host, ATMEL_PDC_RNPR, 0);
at91_mci_write(host, ATMEL_PDC_RNCR, 0);
ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
}
else {
/*
* Handle a write
*/
host->total_length = block_length * blocks;
/*
* MCI1 rev2xx Data Write Operation and
* number of bytes erratum
*/
if (at91mci_is_mci1rev2xx())
if (host->total_length < 12)
host->total_length = 12;
at91_mci_sg_to_dma(host, data);
pr_debug("Transmitting %d bytes\n", host->total_length);
at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
host->total_length : host->total_length / 4);
ier = AT91_MCI_CMDRDY;
}
}
}
/*
* Send the command and then enable the PDC - not the other way round as
* the data sheet says
*/
at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
at91_mci_write(host, AT91_MCI_CMDR, cmdr);
if (cmdr & AT91_MCI_TRCMD_START) {
if (cmdr & AT91_MCI_TRDIR)
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
}
/* Enable selected interrupts */
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
}
/*
* Process the next step in the request
*/
static void at91_mci_process_next(struct at91mci_host *host)
{
if (!(host->flags & FL_SENT_COMMAND)) {
host->flags |= FL_SENT_COMMAND;
at91_mci_send_command(host, host->request->cmd);
}
else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
host->flags |= FL_SENT_STOP;
at91_mci_send_command(host, host->request->stop);
} else {
del_timer(&host->timer);
/* the at91rm9200 mci controller hangs after some transfers,
* and the workaround is to reset it after each transfer.
*/
if (cpu_is_at91rm9200())
at91_reset_host(host);
mmc_request_done(host->mmc, host->request);
}
}
/*
* Handle a command that has been completed
*/
static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
{
struct mmc_command *cmd = host->cmd;
struct mmc_data *data = cmd->data;
at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
status, at91_mci_read(host, AT91_MCI_SR),
cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
if (status & AT91_MCI_ERRORS) {
if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
cmd->error = 0;
}
else {
if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
if (data) {
if (status & AT91_MCI_DTOE)
data->error = -ETIMEDOUT;
else if (status & AT91_MCI_DCRCE)
data->error = -EILSEQ;
}
} else {
if (status & AT91_MCI_RTOE)
cmd->error = -ETIMEDOUT;
else if (status & AT91_MCI_RCRCE)
cmd->error = -EILSEQ;
else
cmd->error = -EIO;
}
pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
cmd->error, data ? data->error : 0,
cmd->opcode, cmd->retries);
}
}
else
cmd->error = 0;
at91_mci_process_next(host);
}
/*
* Handle an MMC request
*/
static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct at91mci_host *host = mmc_priv(mmc);
host->request = mrq;
host->flags = 0;
/* more than 1s timeout needed with slow SD cards */
mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
at91_mci_process_next(host);
}
/*
* Set the IOS
*/
static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
int clkdiv;
struct at91mci_host *host = mmc_priv(mmc);
unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
host->bus_mode = ios->bus_mode;
if (ios->clock == 0) {
/* Disable the MCI controller */
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
clkdiv = 0;
}
else {
/* Enable the MCI controller */
at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
if ((at91_master_clock % (ios->clock * 2)) == 0)
clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
else
clkdiv = (at91_master_clock / ios->clock) / 2;
pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
at91_master_clock / (2 * (clkdiv + 1)));
}
if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
pr_debug("MMC: Setting controller bus width to 4\n");
at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
}
else {
pr_debug("MMC: Setting controller bus width to 1\n");
at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
}
/* Set the clock divider */
at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
/* maybe switch power to the card */
if (gpio_is_valid(host->board->vcc_pin)) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
gpio_set_value(host->board->vcc_pin, 0);
break;
case MMC_POWER_UP:
gpio_set_value(host->board->vcc_pin, 1);
break;
case MMC_POWER_ON:
break;
default:
WARN_ON(1);
}
}
}
/*
* Handle an interrupt
*/
static irqreturn_t at91_mci_irq(int irq, void *devid)
{
struct at91mci_host *host = devid;
int completed = 0;
unsigned int int_status, int_mask;
int_status = at91_mci_read(host, AT91_MCI_SR);
int_mask = at91_mci_read(host, AT91_MCI_IMR);
pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
int_status & int_mask);
int_status = int_status & int_mask;
if (int_status & AT91_MCI_ERRORS) {
completed = 1;
if (int_status & AT91_MCI_UNRE)
pr_debug("MMC: Underrun error\n");
if (int_status & AT91_MCI_OVRE)
pr_debug("MMC: Overrun error\n");
if (int_status & AT91_MCI_DTOE)
pr_debug("MMC: Data timeout\n");
if (int_status & AT91_MCI_DCRCE)
pr_debug("MMC: CRC error in data\n");
if (int_status & AT91_MCI_RTOE)
pr_debug("MMC: Response timeout\n");
if (int_status & AT91_MCI_RENDE)
pr_debug("MMC: Response end bit error\n");
if (int_status & AT91_MCI_RCRCE)
pr_debug("MMC: Response CRC error\n");
if (int_status & AT91_MCI_RDIRE)
pr_debug("MMC: Response direction error\n");
if (int_status & AT91_MCI_RINDE)
pr_debug("MMC: Response index error\n");
} else {
/* Only continue processing if no errors */
if (int_status & AT91_MCI_TXBUFE) {
pr_debug("TX buffer empty\n");
at91_mci_handle_transmitted(host);
}
if (int_status & AT91_MCI_ENDRX) {
pr_debug("ENDRX\n");
at91_mci_post_dma_read(host);
}
if (int_status & AT91_MCI_RXBUFF) {
pr_debug("RX buffer full\n");
at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
completed = 1;
}
if (int_status & AT91_MCI_ENDTX)
pr_debug("Transmit has ended\n");
if (int_status & AT91_MCI_NOTBUSY) {
pr_debug("Card is ready\n");
at91_mci_update_bytes_xfered(host);
completed = 1;
}
if (int_status & AT91_MCI_DTIP)
pr_debug("Data transfer in progress\n");
if (int_status & AT91_MCI_BLKE) {
pr_debug("Block transfer has ended\n");
if (host->request->data && host->request->data->blocks > 1) {
/* multi block write : complete multi write
* command and send stop */
completed = 1;
} else {
at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
}
}
if (int_status & AT91_MCI_SDIOIRQA)
mmc_signal_sdio_irq(host->mmc);
if (int_status & AT91_MCI_SDIOIRQB)
mmc_signal_sdio_irq(host->mmc);
if (int_status & AT91_MCI_TXRDY)
pr_debug("Ready to transmit\n");
if (int_status & AT91_MCI_RXRDY)
pr_debug("Ready to receive\n");
if (int_status & AT91_MCI_CMDRDY) {
pr_debug("Command ready\n");
completed = at91_mci_handle_cmdrdy(host);
}
}
if (completed) {
pr_debug("Completed command\n");
at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
at91_mci_completed_command(host, int_status);
} else
at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
return IRQ_HANDLED;
}
static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
{
struct at91mci_host *host = _host;
int present;
/* entering this ISR means that we have configured det_pin:
* we can use its value in board structure */
present = !gpio_get_value(host->board->det_pin);
/*
* we expect this irq on both insert and remove,
* and use a short delay to debounce.
*/
if (present != host->present) {
host->present = present;
pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
present ? "insert" : "remove");
if (!present) {
pr_debug("****** Resetting SD-card bus width ******\n");
at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
}
/* 0.5s needed because of early card detect switch firing */
mmc_detect_change(host->mmc, msecs_to_jiffies(500));
}
return IRQ_HANDLED;
}
static int at91_mci_get_ro(struct mmc_host *mmc)
{
struct at91mci_host *host = mmc_priv(mmc);
if (gpio_is_valid(host->board->wp_pin))
return !!gpio_get_value(host->board->wp_pin);
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct at91mci_host *host = mmc_priv(mmc);
pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
}
static const struct mmc_host_ops at91_mci_ops = {
.request = at91_mci_request,
.set_ios = at91_mci_set_ios,
.get_ro = at91_mci_get_ro,
.enable_sdio_irq = at91_mci_enable_sdio_irq,
};
/*
* Probe for the device
*/
static int __init at91_mci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct at91mci_host *host;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME))
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
goto fail6;
}
mmc->ops = &at91_mci_ops;
mmc->f_min = 375000;
mmc->f_max = 25000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = 0;
mmc->max_blk_size = MCI_MAXBLKSIZE;
mmc->max_blk_count = MCI_BLKATONCE;
mmc->max_req_size = MCI_BUFSIZE;
mmc->max_segs = MCI_BLKATONCE;
mmc->max_seg_size = MCI_BUFSIZE;
host = mmc_priv(mmc);
host->mmc = mmc;
host->bus_mode = 0;
host->board = pdev->dev.platform_data;
if (host->board->wire4) {
if (at91mci_is_mci1rev2xx())
mmc->caps |= MMC_CAP_4_BIT_DATA;
else
dev_warn(&pdev->dev, "4 wire bus mode not supported"
" - using 1 wire\n");
}
host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE,
&host->physical_address, GFP_KERNEL);
if (!host->buffer) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Can't allocate transmit buffer\n");
goto fail5;
}
/* Add SDIO capability when available */
if (at91mci_is_mci1rev2xx()) {
/* at91mci MCI1 rev2xx sdio interrupt erratum */
if (host->board->wire4 || !host->board->slot_b)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
/*
* Reserve GPIOs ... board init code makes sure these pins are set
* up as GPIOs with the right direction (input, except for vcc)
*/
if (gpio_is_valid(host->board->det_pin)) {
ret = gpio_request(host->board->det_pin, "mmc_detect");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
goto fail4b;
}
}
if (gpio_is_valid(host->board->wp_pin)) {
ret = gpio_request(host->board->wp_pin, "mmc_wp");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
goto fail4;
}
}
if (gpio_is_valid(host->board->vcc_pin)) {
ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
goto fail3;
}
}
/*
* Get Clock
*/
host->mci_clk = clk_get(&pdev->dev, "mci_clk");
if (IS_ERR(host->mci_clk)) {
ret = -ENODEV;
dev_dbg(&pdev->dev, "no mci_clk?\n");
goto fail2;
}
/*
* Map I/O region
*/
host->baseaddr = ioremap(res->start, resource_size(res));
if (!host->baseaddr) {
ret = -ENOMEM;
goto fail1;
}
/*
* Reset hardware
*/
clk_enable(host->mci_clk); /* Enable the peripheral clock */
at91_mci_disable(host);
at91_mci_enable(host);
/*
* Allocate the MCI interrupt
*/
host->irq = platform_get_irq(pdev, 0);
ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
goto fail0;
}
setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
platform_set_drvdata(pdev, mmc);
/*
* Add host to MMC layer
*/
if (gpio_is_valid(host->board->det_pin)) {
host->present = !gpio_get_value(host->board->det_pin);
}
else
host->present = -1;
mmc_add_host(mmc);
/*
* monitor card insertion/removal if we can
*/
if (gpio_is_valid(host->board->det_pin)) {
ret = request_irq(gpio_to_irq(host->board->det_pin),
at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
if (ret)
dev_warn(&pdev->dev, "request MMC detect irq failed\n");
else
device_init_wakeup(&pdev->dev, 1);
}
pr_debug("Added MCI driver\n");
return 0;
fail0:
clk_disable(host->mci_clk);
iounmap(host->baseaddr);
fail1:
clk_put(host->mci_clk);
fail2:
if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
fail3:
if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
fail4:
if (gpio_is_valid(host->board->det_pin))
gpio_free(host->board->det_pin);
fail4b:
if (host->buffer)
dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
host->buffer, host->physical_address);
fail5:
mmc_free_host(mmc);
fail6:
release_mem_region(res->start, resource_size(res));
dev_err(&pdev->dev, "probe failed, err %d\n", ret);
return ret;
}
/*
* Remove a device
*/
static int __exit at91_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct at91mci_host *host;
struct resource *res;
if (!mmc)
return -1;
host = mmc_priv(mmc);
if (host->buffer)
dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
host->buffer, host->physical_address);
if (gpio_is_valid(host->board->det_pin)) {
if (device_can_wakeup(&pdev->dev))
free_irq(gpio_to_irq(host->board->det_pin), host);
device_init_wakeup(&pdev->dev, 0);
gpio_free(host->board->det_pin);
}
at91_mci_disable(host);
del_timer_sync(&host->timer);
mmc_remove_host(mmc);
free_irq(host->irq, host);
clk_disable(host->mci_clk); /* Disable the peripheral clock */
clk_put(host->mci_clk);
if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
iounmap(host->baseaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
mmc_free_host(mmc);
platform_set_drvdata(pdev, NULL);
pr_debug("MCI Removed\n");
return 0;
}
#ifdef CONFIG_PM
static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
enable_irq_wake(host->board->det_pin);
if (mmc)
ret = mmc_suspend_host(mmc);
return ret;
}
static int at91_mci_resume(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
disable_irq_wake(host->board->det_pin);
if (mmc)
ret = mmc_resume_host(mmc);
return ret;
}
#else
#define at91_mci_suspend NULL
#define at91_mci_resume NULL
#endif
static struct platform_driver at91_mci_driver = {
.remove = __exit_p(at91_mci_remove),
.suspend = at91_mci_suspend,
.resume = at91_mci_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static int __init at91_mci_init(void)
{
return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
}
static void __exit at91_mci_exit(void)
{
platform_driver_unregister(&at91_mci_driver);
}
module_init(at91_mci_init);
module_exit(at91_mci_exit);
MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
MODULE_AUTHOR("Nick Randell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:at91_mci");
| gpl-2.0 |
GustavoRD78/78Kernel-ZL-230 | kernel/debug/kdb/kdb_bt.c | 4938 | 5251 | /*
* Kernel Debugger Architecture Independent Stack Traceback
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kdb.h>
#include <linux/nmi.h>
#include "kdb_private.h"
static void kdb_show_stack(struct task_struct *p, void *addr)
{
int old_lvl = console_loglevel;
console_loglevel = 15;
kdb_trap_printk++;
kdb_set_current_task(p);
if (addr) {
show_stack((struct task_struct *)p, addr);
} else if (kdb_current_regs) {
#ifdef CONFIG_X86
show_stack(p, &kdb_current_regs->sp);
#else
show_stack(p, NULL);
#endif
} else {
show_stack(p, NULL);
}
console_loglevel = old_lvl;
kdb_trap_printk--;
}
/*
* kdb_bt
*
* This function implements the 'bt' command. Print a stack
* traceback.
*
* bt [<address-expression>] (addr-exp is for alternate stacks)
* btp <pid> Kernel stack for <pid>
* btt <address-expression> Kernel stack for task structure at
* <address-expression>
* bta [DRSTCZEUIMA] All useful processes, optionally
* filtered by state
* btc [<cpu>] The current process on one cpu,
* default is all cpus
*
* bt <address-expression> refers to a address on the stack, that location
* is assumed to contain a return address.
*
* btt <address-expression> refers to the address of a struct task.
*
* Inputs:
* argc argument count
* argv argument vector
* Outputs:
* None.
* Returns:
* zero for success, a kdb diagnostic if error
* Locking:
* none.
* Remarks:
* Backtrack works best when the code uses frame pointers. But even
* without frame pointers we should get a reasonable trace.
*
* mds comes in handy when examining the stack to do a manual traceback or
* to get a starting point for bt <address-expression>.
*/
static int
kdb_bt1(struct task_struct *p, unsigned long mask,
int argcount, int btaprompt)
{
char buffer[2];
if (kdb_getarea(buffer[0], (unsigned long)p) ||
kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
return KDB_BADADDR;
if (!kdb_task_state(p, mask))
return 0;
kdb_printf("Stack traceback for pid %d\n", p->pid);
kdb_ps1(p);
kdb_show_stack(p, NULL);
if (btaprompt) {
kdb_getstr(buffer, sizeof(buffer),
"Enter <q> to end, <cr> to continue:");
if (buffer[0] == 'q') {
kdb_printf("\n");
return 1;
}
}
touch_nmi_watchdog();
return 0;
}
int
kdb_bt(int argc, const char **argv)
{
int diag;
int argcount = 5;
int btaprompt = 1;
int nextarg;
unsigned long addr;
long offset;
/* Prompt after each proc in bta */
kdbgetintenv("BTAPROMPT", &btaprompt);
if (strcmp(argv[0], "bta") == 0) {
struct task_struct *g, *p;
unsigned long cpu;
unsigned long mask = kdb_task_state_string(argc ? argv[1] :
NULL);
if (argc == 0)
kdb_ps_suppressed();
/* Run the active tasks first */
for_each_online_cpu(cpu) {
p = kdb_curr_task(cpu);
if (kdb_bt1(p, mask, argcount, btaprompt))
return 0;
}
/* Now the inactive tasks */
kdb_do_each_thread(g, p) {
if (task_curr(p))
continue;
if (kdb_bt1(p, mask, argcount, btaprompt))
return 0;
} kdb_while_each_thread(g, p);
} else if (strcmp(argv[0], "btp") == 0) {
struct task_struct *p;
unsigned long pid;
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg((char *)argv[1], &pid);
if (diag)
return diag;
p = find_task_by_pid_ns(pid, &init_pid_ns);
if (p) {
kdb_set_current_task(p);
return kdb_bt1(p, ~0UL, argcount, 0);
}
kdb_printf("No process with pid == %ld found\n", pid);
return 0;
} else if (strcmp(argv[0], "btt") == 0) {
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg((char *)argv[1], &addr);
if (diag)
return diag;
kdb_set_current_task((struct task_struct *)addr);
return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
} else if (strcmp(argv[0], "btc") == 0) {
unsigned long cpu = ~0;
struct task_struct *save_current_task = kdb_current_task;
char buf[80];
if (argc > 1)
return KDB_ARGCOUNT;
if (argc == 1) {
diag = kdbgetularg((char *)argv[1], &cpu);
if (diag)
return diag;
}
/* Recursive use of kdb_parse, do not use argv after
* this point */
argv = NULL;
if (cpu != ~0) {
if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
kdb_printf("no process for cpu %ld\n", cpu);
return 0;
}
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
kdb_parse(buf);
return 0;
}
kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n");
for_each_online_cpu(cpu) {
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
kdb_parse(buf);
touch_nmi_watchdog();
}
kdb_set_current_task(save_current_task);
return 0;
} else {
if (argc) {
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
&offset, NULL);
if (diag)
return diag;
kdb_show_stack(kdb_current_task, (void *)addr);
return 0;
} else {
return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
}
}
/* NOTREACHED */
return 0;
}
| gpl-2.0 |
DerRomtester/android_kernel_oneplus_msm8974 | drivers/infiniband/hw/ehca/ehca_qp.c | 5194 | 64713 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* QP functions
*
* Authors: Joachim Fenkes <fenkes@de.ibm.com>
* Stefan Roscher <stefan.roscher@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#include "hipz_fns.h"
static struct kmem_cache *qp_cache;
/*
* attributes not supported by query qp
*/
#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
IB_QP_EN_SQD_ASYNC_NOTIFY)
/*
* ehca (internal) qp state values
*/
enum ehca_qp_state {
EHCA_QPS_RESET = 1,
EHCA_QPS_INIT = 2,
EHCA_QPS_RTR = 3,
EHCA_QPS_RTS = 5,
EHCA_QPS_SQD = 6,
EHCA_QPS_SQE = 8,
EHCA_QPS_ERR = 128
};
/*
* qp state transitions as defined by IB Arch Rel 1.1 page 431
*/
enum ib_qp_statetrans {
IB_QPST_ANY2RESET,
IB_QPST_ANY2ERR,
IB_QPST_RESET2INIT,
IB_QPST_INIT2RTR,
IB_QPST_INIT2INIT,
IB_QPST_RTR2RTS,
IB_QPST_RTS2SQD,
IB_QPST_RTS2RTS,
IB_QPST_SQD2RTS,
IB_QPST_SQE2RTS,
IB_QPST_SQD2SQD,
IB_QPST_MAX /* nr of transitions, this must be last!!! */
};
/*
* ib2ehca_qp_state maps IB to ehca qp_state
* returns ehca qp state corresponding to given ib qp state
*/
static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
{
switch (ib_qp_state) {
case IB_QPS_RESET:
return EHCA_QPS_RESET;
case IB_QPS_INIT:
return EHCA_QPS_INIT;
case IB_QPS_RTR:
return EHCA_QPS_RTR;
case IB_QPS_RTS:
return EHCA_QPS_RTS;
case IB_QPS_SQD:
return EHCA_QPS_SQD;
case IB_QPS_SQE:
return EHCA_QPS_SQE;
case IB_QPS_ERR:
return EHCA_QPS_ERR;
default:
ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
return -EINVAL;
}
}
/*
* ehca2ib_qp_state maps ehca to IB qp_state
* returns ib qp state corresponding to given ehca qp state
*/
static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
ehca_qp_state)
{
switch (ehca_qp_state) {
case EHCA_QPS_RESET:
return IB_QPS_RESET;
case EHCA_QPS_INIT:
return IB_QPS_INIT;
case EHCA_QPS_RTR:
return IB_QPS_RTR;
case EHCA_QPS_RTS:
return IB_QPS_RTS;
case EHCA_QPS_SQD:
return IB_QPS_SQD;
case EHCA_QPS_SQE:
return IB_QPS_SQE;
case EHCA_QPS_ERR:
return IB_QPS_ERR;
default:
ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
return -EINVAL;
}
}
/*
* ehca_qp_type used as index for req_attr and opt_attr of
* struct ehca_modqp_statetrans
*/
enum ehca_qp_type {
QPT_RC = 0,
QPT_UC = 1,
QPT_UD = 2,
QPT_SQP = 3,
QPT_MAX
};
/*
* ib2ehcaqptype maps Ib to ehca qp_type
* returns ehca qp type corresponding to ib qp type
*/
static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
{
switch (ibqptype) {
case IB_QPT_SMI:
case IB_QPT_GSI:
return QPT_SQP;
case IB_QPT_RC:
return QPT_RC;
case IB_QPT_UC:
return QPT_UC;
case IB_QPT_UD:
return QPT_UD;
default:
ehca_gen_err("Invalid ibqptype=%x", ibqptype);
return -EINVAL;
}
}
static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
int ib_tostate)
{
int index = -EINVAL;
switch (ib_tostate) {
case IB_QPS_RESET:
index = IB_QPST_ANY2RESET;
break;
case IB_QPS_INIT:
switch (ib_fromstate) {
case IB_QPS_RESET:
index = IB_QPST_RESET2INIT;
break;
case IB_QPS_INIT:
index = IB_QPST_INIT2INIT;
break;
}
break;
case IB_QPS_RTR:
if (ib_fromstate == IB_QPS_INIT)
index = IB_QPST_INIT2RTR;
break;
case IB_QPS_RTS:
switch (ib_fromstate) {
case IB_QPS_RTR:
index = IB_QPST_RTR2RTS;
break;
case IB_QPS_RTS:
index = IB_QPST_RTS2RTS;
break;
case IB_QPS_SQD:
index = IB_QPST_SQD2RTS;
break;
case IB_QPS_SQE:
index = IB_QPST_SQE2RTS;
break;
}
break;
case IB_QPS_SQD:
if (ib_fromstate == IB_QPS_RTS)
index = IB_QPST_RTS2SQD;
break;
case IB_QPS_SQE:
break;
case IB_QPS_ERR:
index = IB_QPST_ANY2ERR;
break;
default:
break;
}
return index;
}
/*
* ibqptype2servicetype returns hcp service type corresponding to given
* ib qp type used by create_qp()
*/
static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
{
switch (ibqptype) {
case IB_QPT_SMI:
case IB_QPT_GSI:
return ST_UD;
case IB_QPT_RC:
return ST_RC;
case IB_QPT_UC:
return ST_UC;
case IB_QPT_UD:
return ST_UD;
case IB_QPT_RAW_IPV6:
return -EINVAL;
case IB_QPT_RAW_ETHERTYPE:
return -EINVAL;
default:
ehca_gen_err("Invalid ibqptype=%x", ibqptype);
return -EINVAL;
}
}
/*
* init userspace queue info from ipz_queue data
*/
static inline void queue2resp(struct ipzu_queue_resp *resp,
struct ipz_queue *queue)
{
resp->qe_size = queue->qe_size;
resp->act_nr_of_sg = queue->act_nr_of_sg;
resp->queue_length = queue->queue_length;
resp->pagesize = queue->pagesize;
resp->toggle_state = queue->toggle_state;
resp->offset = queue->offset;
}
/*
* init_qp_queue initializes/constructs r/squeue and registers queue pages.
*/
static inline int init_qp_queue(struct ehca_shca *shca,
struct ehca_pd *pd,
struct ehca_qp *my_qp,
struct ipz_queue *queue,
int q_type,
u64 expected_hret,
struct ehca_alloc_queue_parms *parms,
int wqe_size)
{
int ret, cnt, ipz_rc, nr_q_pages;
void *vpage;
u64 rpage, h_ret;
struct ib_device *ib_dev = &shca->ib_device;
struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
if (!parms->queue_size)
return 0;
if (parms->is_small) {
nr_q_pages = 1;
ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
128 << parms->page_size,
wqe_size, parms->act_nr_sges, 1);
} else {
nr_q_pages = parms->queue_size;
ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
EHCA_PAGESIZE, wqe_size,
parms->act_nr_sges, 0);
}
if (!ipz_rc) {
ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
ipz_rc);
return -EBUSY;
}
/* register queue pages */
for (cnt = 0; cnt < nr_q_pages; cnt++) {
vpage = ipz_qpageit_get_inc(queue);
if (!vpage) {
ehca_err(ib_dev, "ipz_qpageit_get_inc() "
"failed p_vpage= %p", vpage);
ret = -EINVAL;
goto init_qp_queue1;
}
rpage = virt_to_abs(vpage);
h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
my_qp->ipz_qp_handle,
NULL, 0, q_type,
rpage, parms->is_small ? 0 : 1,
my_qp->galpas.kernel);
if (cnt == (nr_q_pages - 1)) { /* last page! */
if (h_ret != expected_hret) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret=%lli", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
if (vpage) {
ehca_err(ib_dev, "ipz_qpageit_get_inc() "
"should not succeed vpage=%p", vpage);
ret = -EINVAL;
goto init_qp_queue1;
}
} else {
if (h_ret != H_PAGE_REGISTERED) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret=%lli", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
}
}
ipz_qeit_reset(queue);
return 0;
init_qp_queue1:
ipz_queue_dtor(pd, queue);
return ret;
}
static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
{
if (is_llqp)
return 128 << act_nr_sge;
else
return offsetof(struct ehca_wqe,
u.nud.sg_list[act_nr_sge]);
}
static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
int req_nr_sge, int is_llqp)
{
u32 wqe_size, q_size;
int act_nr_sge = req_nr_sge;
if (!is_llqp)
/* round up #SGEs so WQE size is a power of 2 */
for (act_nr_sge = 4; act_nr_sge <= 252;
act_nr_sge = 4 + 2 * act_nr_sge)
if (act_nr_sge >= req_nr_sge)
break;
wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
q_size = wqe_size * (queue->max_wr + 1);
if (q_size <= 512)
queue->page_size = 2;
else if (q_size <= 1024)
queue->page_size = 3;
else
queue->page_size = 0;
queue->is_small = (queue->page_size != 0);
}
/* needs to be called with cq->spinlock held */
void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
{
struct list_head *list, *node;
/* TODO: support low latency QPs */
if (qp->ext_type == EQPT_LLQP)
return;
if (on_sq) {
list = &qp->send_cq->sqp_err_list;
node = &qp->sq_err_node;
} else {
list = &qp->recv_cq->rqp_err_list;
node = &qp->rq_err_node;
}
if (list_empty(node))
list_add_tail(node, list);
return;
}
static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
{
unsigned long flags;
spin_lock_irqsave(&cq->spinlock, flags);
if (!list_empty(node))
list_del_init(node);
spin_unlock_irqrestore(&cq->spinlock, flags);
}
static void reset_queue_map(struct ehca_queue_map *qmap)
{
int i;
qmap->tail = qmap->entries - 1;
qmap->left_to_poll = 0;
qmap->next_wqe_idx = 0;
for (i = 0; i < qmap->entries; i++) {
qmap->map[i].reported = 1;
qmap->map[i].cqe_req = 0;
}
}
/*
* Create an ib_qp struct that is either a QP or an SRQ, depending on
* the value of the is_srq parameter. If init_attr and srq_init_attr share
* fields, the field out of init_attr is used.
*/
static struct ehca_qp *internal_create_qp(
struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata, int is_srq)
{
struct ehca_qp *my_qp, *my_srq = NULL;
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct ib_ucontext *context = NULL;
u64 h_ret;
int is_llqp = 0, has_srq = 0, is_user = 0;
int qp_type, max_send_sge, max_recv_sge, ret;
/* h_call's out parameters */
struct ehca_alloc_qp_parms parms;
u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
unsigned long flags;
if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
ehca_err(pd->device, "Unable to create QP, max number of %i "
"QPs reached.", shca->max_num_qps);
ehca_err(pd->device, "To increase the maximum number of QPs "
"use the number_of_qps module parameter.\n");
return ERR_PTR(-ENOSPC);
}
if (init_attr->create_flags) {
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
memset(&parms, 0, sizeof(parms));
qp_type = init_attr->qp_type;
if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
init_attr->sq_sig_type);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
/* save LLQP info */
if (qp_type & 0x80) {
is_llqp = 1;
parms.ext_type = EQPT_LLQP;
parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
}
qp_type &= 0x1F;
init_attr->qp_type &= 0x1F;
/* handle SRQ base QPs */
if (init_attr->srq) {
my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
if (qp_type == IB_QPT_UC) {
ehca_err(pd->device, "UC with SRQ not supported");
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
has_srq = 1;
parms.ext_type = EQPT_SRQBASE;
parms.srq_qpn = my_srq->real_qp_num;
}
if (is_llqp && has_srq) {
ehca_err(pd->device, "LLQPs can't have an SRQ");
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
/* handle SRQs */
if (is_srq) {
parms.ext_type = EQPT_SRQ;
parms.srq_limit = srq_init_attr->attr.srq_limit;
if (init_attr->cap.max_recv_sge > 3) {
ehca_err(pd->device, "no more than three SGEs "
"supported for SRQ pd=%p max_sge=%x",
pd, init_attr->cap.max_recv_sge);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
}
/* check QP type */
if (qp_type != IB_QPT_UD &&
qp_type != IB_QPT_UC &&
qp_type != IB_QPT_RC &&
qp_type != IB_QPT_SMI &&
qp_type != IB_QPT_GSI) {
ehca_err(pd->device, "wrong QP Type=%x", qp_type);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
if (is_llqp) {
switch (qp_type) {
case IB_QPT_RC:
if ((init_attr->cap.max_send_wr > 255) ||
(init_attr->cap.max_recv_wr > 255)) {
ehca_err(pd->device,
"Invalid Number of max_sq_wr=%x "
"or max_rq_wr=%x for RC LLQP",
init_attr->cap.max_send_wr,
init_attr->cap.max_recv_wr);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
break;
case IB_QPT_UD:
if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
ehca_err(pd->device, "UD LLQP not supported "
"by this adapter");
atomic_dec(&shca->num_qps);
return ERR_PTR(-ENOSYS);
}
if (!(init_attr->cap.max_send_sge <= 5
&& init_attr->cap.max_send_sge >= 1
&& init_attr->cap.max_recv_sge <= 5
&& init_attr->cap.max_recv_sge >= 1)) {
ehca_err(pd->device,
"Invalid Number of max_send_sge=%x "
"or max_recv_sge=%x for UD LLQP",
init_attr->cap.max_send_sge,
init_attr->cap.max_recv_sge);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
} else if (init_attr->cap.max_send_wr > 255) {
ehca_err(pd->device,
"Invalid Number of "
"max_send_wr=%x for UD QP_TYPE=%x",
init_attr->cap.max_send_wr, qp_type);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
break;
default:
ehca_err(pd->device, "unsupported LL QP Type=%x",
qp_type);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
} else {
int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
|| qp_type == IB_QPT_GSI) ? 250 : 252;
if (init_attr->cap.max_send_sge > max_sge
|| init_attr->cap.max_recv_sge > max_sge) {
ehca_err(pd->device, "Invalid number of SGEs requested "
"send_sge=%x recv_sge=%x max_sge=%x",
init_attr->cap.max_send_sge,
init_attr->cap.max_recv_sge, max_sge);
atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
}
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
atomic_dec(&shca->num_qps);
return ERR_PTR(-ENOMEM);
}
if (pd->uobject && udata) {
is_user = 1;
context = pd->uobject->context;
}
atomic_set(&my_qp->nr_events, 0);
init_waitqueue_head(&my_qp->wait_completion);
spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type;
my_qp->ext_type = parms.ext_type;
my_qp->state = IB_QPS_RESET;
if (init_attr->recv_cq)
my_qp->recv_cq =
container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
if (init_attr->send_cq)
my_qp->send_cq =
container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
do {
if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
ret = -ENOMEM;
ehca_err(pd->device, "Can't reserve idr resources.");
goto create_qp_exit0;
}
write_lock_irqsave(&ehca_qp_idr_lock, flags);
ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
} while (ret == -EAGAIN);
if (ret) {
ret = -ENOMEM;
ehca_err(pd->device, "Can't allocate new idr entry.");
goto create_qp_exit0;
}
if (my_qp->token > 0x1FFFFFF) {
ret = -EINVAL;
ehca_err(pd->device, "Invalid number of qp");
goto create_qp_exit1;
}
if (has_srq)
parms.srq_token = my_qp->token;
parms.servicetype = ibqptype2servicetype(qp_type);
if (parms.servicetype < 0) {
ret = -EINVAL;
ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
goto create_qp_exit1;
}
/* Always signal by WQE so we can hide circ. WQEs */
parms.sigtype = HCALL_SIGT_BY_WQE;
/* UD_AV CIRCUMVENTION */
max_send_sge = init_attr->cap.max_send_sge;
max_recv_sge = init_attr->cap.max_recv_sge;
if (parms.servicetype == ST_UD && !is_llqp) {
max_send_sge += 2;
max_recv_sge += 2;
}
parms.token = my_qp->token;
parms.eq_handle = shca->eq.ipz_eq_handle;
parms.pd = my_pd->fw_pd;
if (my_qp->send_cq)
parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
if (my_qp->recv_cq)
parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
parms.squeue.max_wr = init_attr->cap.max_send_wr;
parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
parms.squeue.max_sge = max_send_sge;
parms.rqueue.max_sge = max_recv_sge;
/* RC QPs need one more SWQE for unsolicited ack circumvention */
if (qp_type == IB_QPT_RC)
parms.squeue.max_wr++;
if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
if (HAS_SQ(my_qp))
ehca_determine_small_queue(
&parms.squeue, max_send_sge, is_llqp);
if (HAS_RQ(my_qp))
ehca_determine_small_queue(
&parms.rqueue, max_recv_sge, is_llqp);
parms.qp_storage =
(parms.squeue.is_small || parms.rqueue.is_small);
}
h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit1;
}
ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
my_qp->ipz_qp_handle = parms.qp_handle;
my_qp->galpas = parms.galpas;
swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
switch (qp_type) {
case IB_QPT_RC:
if (is_llqp) {
parms.squeue.act_nr_sges = 1;
parms.rqueue.act_nr_sges = 1;
}
/* hide the extra WQE */
parms.squeue.act_nr_wqes--;
break;
case IB_QPT_UD:
case IB_QPT_GSI:
case IB_QPT_SMI:
/* UD circumvention */
if (is_llqp) {
parms.squeue.act_nr_sges = 1;
parms.rqueue.act_nr_sges = 1;
} else {
parms.squeue.act_nr_sges -= 2;
parms.rqueue.act_nr_sges -= 2;
}
if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
}
break;
default:
break;
}
/* initialize r/squeue and register queue pages */
if (HAS_SQ(my_qp)) {
ret = init_qp_queue(
shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
&parms.squeue, swqe_size);
if (ret) {
ehca_err(pd->device, "Couldn't initialize squeue "
"and pages ret=%i", ret);
goto create_qp_exit2;
}
if (!is_user) {
my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
my_qp->ipz_squeue.qe_size;
my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
sizeof(struct ehca_qmap_entry));
if (!my_qp->sq_map.map) {
ehca_err(pd->device, "Couldn't allocate squeue "
"map ret=%i", ret);
goto create_qp_exit3;
}
INIT_LIST_HEAD(&my_qp->sq_err_node);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map(&my_qp->sq_map);
}
}
if (HAS_RQ(my_qp)) {
ret = init_qp_queue(
shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
H_SUCCESS, &parms.rqueue, rwqe_size);
if (ret) {
ehca_err(pd->device, "Couldn't initialize rqueue "
"and pages ret=%i", ret);
goto create_qp_exit4;
}
if (!is_user) {
my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
my_qp->ipz_rqueue.qe_size;
my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
sizeof(struct ehca_qmap_entry));
if (!my_qp->rq_map.map) {
ehca_err(pd->device, "Couldn't allocate squeue "
"map ret=%i", ret);
goto create_qp_exit5;
}
INIT_LIST_HEAD(&my_qp->rq_err_node);
/* to avoid the generation of bogus flush CQEs */
reset_queue_map(&my_qp->rq_map);
}
} else if (init_attr->srq && !is_user) {
/* this is a base QP, use the queue map of the SRQ */
my_qp->rq_map = my_srq->rq_map;
INIT_LIST_HEAD(&my_qp->rq_err_node);
my_qp->ipz_rqueue = my_srq->ipz_rqueue;
}
if (is_srq) {
my_qp->ib_srq.pd = &my_pd->ib_pd;
my_qp->ib_srq.device = my_pd->ib_pd.device;
my_qp->ib_srq.srq_context = init_attr->qp_context;
my_qp->ib_srq.event_handler = init_attr->event_handler;
} else {
my_qp->ib_qp.qp_num = ib_qp_num;
my_qp->ib_qp.pd = &my_pd->ib_pd;
my_qp->ib_qp.device = my_pd->ib_pd.device;
my_qp->ib_qp.recv_cq = init_attr->recv_cq;
my_qp->ib_qp.send_cq = init_attr->send_cq;
my_qp->ib_qp.qp_type = qp_type;
my_qp->ib_qp.srq = init_attr->srq;
my_qp->ib_qp.qp_context = init_attr->qp_context;
my_qp->ib_qp.event_handler = init_attr->event_handler;
}
init_attr->cap.max_inline_data = 0; /* not supported yet */
init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
my_qp->init_attr = *init_attr;
if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
&my_qp->ib_qp;
if (ehca_nr_ports < 0) {
/* alloc array to cache subsequent modify qp parms
* for autodetect mode
*/
my_qp->mod_qp_parm =
kzalloc(EHCA_MOD_QP_PARM_MAX *
sizeof(*my_qp->mod_qp_parm),
GFP_KERNEL);
if (!my_qp->mod_qp_parm) {
ehca_err(pd->device,
"Could not alloc mod_qp_parm");
goto create_qp_exit5;
}
}
}
/* NOTE: define_apq0() not supported yet */
if (qp_type == IB_QPT_GSI) {
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
kfree(my_qp->mod_qp_parm);
my_qp->mod_qp_parm = NULL;
/* the QP pointer is no longer valid */
shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
NULL;
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit6;
}
}
if (my_qp->send_cq) {
ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
if (ret) {
ehca_err(pd->device,
"Couldn't assign qp to send_cq ret=%i", ret);
goto create_qp_exit7;
}
}
/* copy queues, galpa data to user space */
if (context && udata) {
struct ehca_create_qp_resp resp;
memset(&resp, 0, sizeof(resp));
resp.qp_num = my_qp->real_qp_num;
resp.token = my_qp->token;
resp.qp_type = my_qp->qp_type;
resp.ext_type = my_qp->ext_type;
resp.qkey = my_qp->qkey;
resp.real_qp_num = my_qp->real_qp_num;
if (HAS_SQ(my_qp))
queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
if (HAS_RQ(my_qp))
queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
resp.fw_handle_ofs = (u32)
(my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
goto create_qp_exit8;
}
}
return my_qp;
create_qp_exit8:
ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
create_qp_exit7:
kfree(my_qp->mod_qp_parm);
create_qp_exit6:
if (HAS_RQ(my_qp) && !is_user)
vfree(my_qp->rq_map.map);
create_qp_exit5:
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
create_qp_exit4:
if (HAS_SQ(my_qp) && !is_user)
vfree(my_qp->sq_map.map);
create_qp_exit3:
if (HAS_SQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
create_qp_exit2:
hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
create_qp_exit1:
write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
create_qp_exit0:
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
return ERR_PTR(ret);
}
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
struct ehca_qp *ret;
ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
}
static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
struct ib_uobject *uobject);
struct ib_srq *ehca_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
struct ib_qp_init_attr qp_init_attr;
struct ehca_qp *my_qp;
struct ib_srq *ret;
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct hcp_modify_qp_control_block *mqpcb;
u64 hret, update_mask;
if (srq_init_attr->srq_type != IB_SRQT_BASIC)
return ERR_PTR(-ENOSYS);
/* For common attributes, internal_create_qp() takes its info
* out of qp_init_attr, so copy all common attrs there.
*/
memset(&qp_init_attr, 0, sizeof(qp_init_attr));
qp_init_attr.event_handler = srq_init_attr->event_handler;
qp_init_attr.qp_context = srq_init_attr->srq_context;
qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
qp_init_attr.qp_type = IB_QPT_RC;
qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
if (IS_ERR(my_qp))
return (struct ib_srq *)my_qp;
/* copy back return values */
srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
srq_init_attr->attr.max_sge = 3;
/* drive SRQ into RTR state */
mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!mqpcb) {
ehca_err(pd->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
ret = ERR_PTR(-ENOMEM);
goto create_srq1;
}
mqpcb->qp_state = EHCA_QPS_INIT;
mqpcb->prim_phys_port = 1;
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
hret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
update_mask,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to INIT "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
mqpcb->qp_enable = 1;
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
hret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
update_mask,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not enable SRQ "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
mqpcb->qp_state = EHCA_QPS_RTR;
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
hret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
update_mask,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to RTR "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
ehca_free_fw_ctrlblock(mqpcb);
return &my_qp->ib_srq;
create_srq2:
ret = ERR_PTR(ehca2ib_return_code(hret));
ehca_free_fw_ctrlblock(mqpcb);
create_srq1:
internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
return ret;
}
/*
* prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
* set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
* returns total number of bad wqes in bad_wqe_cnt
*/
static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
int *bad_wqe_cnt)
{
u64 h_ret;
struct ipz_queue *squeue;
void *bad_send_wqe_p, *bad_send_wqe_v;
u64 q_ofs;
struct ehca_wqe *wqe;
int qp_num = my_qp->ib_qp.qp_num;
/* get send wqe pointer */
h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
my_qp->ipz_qp_handle, &my_qp->pf,
&bad_send_wqe_p, NULL, 2);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
" ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
if (ehca_debug_level >= 2)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue;
if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
" bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
return -EFAULT;
}
/* loop sets wqe's purge bit */
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level >= 2)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = (*bad_wqe_cnt)+1;
}
/*
* bad wqe will be reprocessed and ignored when pol_cq() is called,
* i.e. nr of wqes with flush error status is one less
*/
ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
qp_num, (*bad_wqe_cnt)-1);
wqe->wqef = 0;
return 0;
}
static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
struct ehca_queue_map *qmap)
{
void *wqe_v;
u64 q_ofs;
u32 wqe_idx;
unsigned int tail_idx;
/* convert real to abs address */
wqe_p = wqe_p & (~(1UL << 63));
wqe_v = abs_to_virt(wqe_p);
if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
ehca_gen_err("Invalid offset for calculating left cqes "
"wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
return -EFAULT;
}
tail_idx = next_index(qmap->tail, qmap->entries);
wqe_idx = q_ofs / ipz_queue->qe_size;
/* check all processed wqes, whether a cqe is requested or not */
while (tail_idx != wqe_idx) {
if (qmap->map[tail_idx].cqe_req)
qmap->left_to_poll++;
tail_idx = next_index(tail_idx, qmap->entries);
}
/* save index in queue, where we have to start flushing */
qmap->next_wqe_idx = wqe_idx;
return 0;
}
static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
{
u64 h_ret;
void *send_wqe_p, *recv_wqe_p;
int ret;
unsigned long flags;
int qp_num = my_qp->ib_qp.qp_num;
/* this hcall is not supported on base QPs */
if (my_qp->ext_type != EQPT_SRQBASE) {
/* get send and receive wqe pointer */
h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
my_qp->ipz_qp_handle, &my_qp->pf,
&send_wqe_p, &recv_wqe_p, 4);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "disable_and_get_wqe() "
"failed ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
/*
* acquire lock to ensure that nobody is polling the cq which
* could mean that the qmap->tail pointer is in an
* inconsistent state.
*/
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
&my_qp->sq_map);
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
if (ret)
return ret;
spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
&my_qp->rq_map);
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
if (ret)
return ret;
} else {
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
my_qp->sq_map.left_to_poll = 0;
my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
my_qp->sq_map.entries);
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
my_qp->rq_map.left_to_poll = 0;
my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
my_qp->rq_map.entries);
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
}
/* this assures flush cqes being generated only for pending wqes */
if ((my_qp->sq_map.left_to_poll == 0) &&
(my_qp->rq_map.left_to_poll == 0)) {
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
ehca_add_to_err_list(my_qp, 1);
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
if (HAS_RQ(my_qp)) {
spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
ehca_add_to_err_list(my_qp, 0);
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
flags);
}
}
return 0;
}
/*
* internal_modify_qp with circumvention to handle aqp0 properly
* smi_reset2init indicates if this is an internal reset-to-init-call for
* smi. This flag must always be zero if called from ehca_modify_qp()!
* This internal func was intorduced to avoid recursion of ehca_modify_qp()!
*/
static int internal_modify_qp(struct ib_qp *ibqp,
struct ib_qp_attr *attr,
int attr_mask, int smi_reset2init)
{
enum ib_qp_state qp_cur_state, qp_new_state;
int cnt, qp_attr_idx, ret = 0;
enum ib_qp_statetrans statetrans;
struct hcp_modify_qp_control_block *mqpcb;
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
struct ehca_shca *shca =
container_of(ibqp->pd->device, struct ehca_shca, ib_device);
u64 update_mask;
u64 h_ret;
int bad_wqe_cnt = 0;
int is_user = 0;
int squeue_locked = 0;
unsigned long flags = 0;
/* do query_qp to obtain current attr values */
mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
if (!mqpcb) {
ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
return -ENOMEM;
}
h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
mqpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ehca_err(ibqp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, ibqp->qp_num, h_ret);
ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1;
}
if (ibqp->uobject)
is_user = 1;
qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
if (qp_cur_state == -EINVAL) { /* invalid qp state */
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
mqpcb->qp_state, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
/*
* circumvention to set aqp0 initial state to init
* as expected by IB spec
*/
if (smi_reset2init == 0 &&
ibqp->qp_type == IB_QPT_SMI &&
qp_cur_state == IB_QPS_RESET &&
(attr_mask & IB_QP_STATE) &&
attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
struct ib_qp_attr smiqp_attr = {
.qp_state = IB_QPS_INIT,
.port_num = my_qp->init_attr.port_num,
.pkey_index = 0,
.qkey = 0
};
int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
IB_QP_PKEY_INDEX | IB_QP_QKEY;
int smirc = internal_modify_qp(
ibqp, &smiqp_attr, smiqp_attr_mask, 1);
if (smirc) {
ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
"ehca_modify_qp() rc=%i", smirc);
ret = H_PARAMETER;
goto modify_qp_exit1;
}
qp_cur_state = IB_QPS_INIT;
ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
}
/* is transmitted current state equal to "real" current state */
if ((attr_mask & IB_QP_CUR_STATE) &&
qp_cur_state != attr->cur_qp_state) {
ret = -EINVAL;
ehca_err(ibqp->device,
"Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
" actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
"new qp_state=%x attribute_mask=%x",
my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
if (!smi_reset2init &&
!ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
attr_mask)) {
ret = -EINVAL;
ehca_err(ibqp->device,
"Invalid qp transition new_state=%x cur_state=%x "
"ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
goto modify_qp_exit1;
}
mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
if (mqpcb->qp_state)
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
else {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid new qp state=%x "
"ehca_qp=%p qp_num=%x",
qp_new_state, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
/* retrieve state transition struct to get req and opt attrs */
statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
if (statetrans < 0) {
ret = -EINVAL;
ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
"new_qp_state=%x State_xsition=%x ehca_qp=%p "
"qp_num=%x", qp_cur_state, qp_new_state,
statetrans, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
if (qp_attr_idx < 0) {
ret = qp_attr_idx;
ehca_err(ibqp->device,
"Invalid QP type=%x ehca_qp=%p qp_num=%x",
ibqp->qp_type, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
ehca_dbg(ibqp->device,
"ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
my_qp, ibqp->qp_num, statetrans);
/* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
* in non-LL UD QPs.
*/
if ((my_qp->qp_type == IB_QPT_UD) &&
(my_qp->ext_type != EQPT_LLQP) &&
(statetrans == IB_QPST_INIT2RTR) &&
(shca->hw_level >= 0x22)) {
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
mqpcb->send_grh_flag = 1;
}
/* sqe -> rts: set purge bit of bad wqe before actual trans */
if ((my_qp->qp_type == IB_QPT_UD ||
my_qp->qp_type == IB_QPT_GSI ||
my_qp->qp_type == IB_QPT_SMI) &&
statetrans == IB_QPST_SQE2RTS) {
/* mark next free wqe if kernel */
if (!ibqp->uobject) {
struct ehca_wqe *wqe;
/* lock send queue */
spin_lock_irqsave(&my_qp->spinlock_s, flags);
squeue_locked = 1;
/* mark next free wqe */
wqe = (struct ehca_wqe *)
ipz_qeit_get(&my_qp->ipz_squeue);
wqe->optype = wqe->wqef = 0xff;
ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
ibqp->qp_num, wqe);
}
ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
if (ret) {
ehca_err(ibqp->device, "prepare_sqe_rts() failed "
"ehca_qp=%p qp_num=%x ret=%i",
my_qp, ibqp->qp_num, ret);
goto modify_qp_exit2;
}
}
/*
* enable RDMA_Atomic_Control if reset->init und reliable con
* this is necessary since gen2 does not provide that flag,
* but pHyp requires it
*/
if (statetrans == IB_QPST_RESET2INIT &&
(ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
mqpcb->rdma_atomic_ctrl = 3;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
}
/* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
if (statetrans == IB_QPST_INIT2RTR &&
(ibqp->qp_type == IB_QPT_UC) &&
!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
}
if (attr_mask & IB_QP_PKEY_INDEX) {
if (attr->pkey_index >= 16) {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid pkey_index=%x. "
"ehca_qp=%p qp_num=%x max_pkey_index=f",
attr->pkey_index, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
mqpcb->prim_p_key_idx = attr->pkey_index;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
}
if (attr_mask & IB_QP_PORT) {
struct ehca_sport *sport;
struct ehca_qp *aqp1;
if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid port=%x. "
"ehca_qp=%p qp_num=%x num_ports=%x",
attr->port_num, my_qp, ibqp->qp_num,
shca->num_ports);
goto modify_qp_exit2;
}
sport = &shca->sport[attr->port_num - 1];
if (!sport->ibqp_sqp[IB_QPT_GSI]) {
/* should not occur */
ret = -EFAULT;
ehca_err(ibqp->device, "AQP1 was not created for "
"port=%x", attr->port_num);
goto modify_qp_exit2;
}
aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
struct ehca_qp, ib_qp);
if (ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_SMI &&
aqp1->mod_qp_parm) {
/*
* firmware will reject this modify_qp() because
* port is not activated/initialized fully
*/
ret = -EFAULT;
ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
"either port is being activated (try again) "
"or cabling issue", attr->port_num);
goto modify_qp_exit2;
}
mqpcb->prim_phys_port = attr->port_num;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
}
if (attr_mask & IB_QP_QKEY) {
mqpcb->qkey = attr->qkey;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
}
if (attr_mask & IB_QP_AV) {
mqpcb->dlid = attr->ah_attr.dlid;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
mqpcb->service_level = attr->ah_attr.sl;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
attr->ah_attr.static_rate,
&mqpcb->max_static_rate)) {
ret = -EINVAL;
goto modify_qp_exit2;
}
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
/*
* Always supply the GRH flag, even if it's zero, to give the
* hypervisor a clear "yes" or "no" instead of a "perhaps"
*/
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
/*
* only if GRH is TRUE we might consider SOURCE_GID_IDX
* and DEST_GID otherwise phype will return H_ATTR_PARM!!!
*/
if (attr->ah_attr.ah_flags == IB_AH_GRH) {
mqpcb->send_grh_flag = 1;
mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
for (cnt = 0; cnt < 16; cnt++)
mqpcb->dest_gid.byte[cnt] =
attr->ah_attr.grh.dgid.raw[cnt];
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
mqpcb->flow_label = attr->ah_attr.grh.flow_label;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
}
}
if (attr_mask & IB_QP_PATH_MTU) {
/* store ld(MTU) */
my_qp->mtu_shift = attr->path_mtu + 7;
mqpcb->path_mtu = attr->path_mtu;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
}
if (attr_mask & IB_QP_TIMEOUT) {
mqpcb->timeout = attr->timeout;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
}
if (attr_mask & IB_QP_RETRY_CNT) {
mqpcb->retry_count = attr->retry_cnt;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
}
if (attr_mask & IB_QP_RNR_RETRY) {
mqpcb->rnr_retry_count = attr->rnr_retry;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
}
if (attr_mask & IB_QP_RQ_PSN) {
mqpcb->receive_psn = attr->rq_psn;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
attr->max_dest_rd_atomic : 2;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
attr->max_rd_atomic : 2;
update_mask |=
EHCA_BMASK_SET
(MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
}
if (attr_mask & IB_QP_ALT_PATH) {
if (attr->alt_port_num < 1
|| attr->alt_port_num > shca->num_ports) {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid alt_port=%x. "
"ehca_qp=%p qp_num=%x num_ports=%x",
attr->alt_port_num, my_qp, ibqp->qp_num,
shca->num_ports);
goto modify_qp_exit2;
}
mqpcb->alt_phys_port = attr->alt_port_num;
if (attr->alt_pkey_index >= 16) {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
"ehca_qp=%p qp_num=%x max_pkey_index=f",
attr->pkey_index, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
mqpcb->alt_p_key_idx = attr->alt_pkey_index;
mqpcb->timeout_al = attr->alt_timeout;
mqpcb->dlid_al = attr->alt_ah_attr.dlid;
mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
mqpcb->service_level_al = attr->alt_ah_attr.sl;
if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
attr->alt_ah_attr.static_rate,
&mqpcb->max_static_rate_al)) {
ret = -EINVAL;
goto modify_qp_exit2;
}
/* OpenIB doesn't support alternate retry counts - copy them */
mqpcb->retry_count_al = mqpcb->retry_count;
mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
| EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
| EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
/*
* Always supply the GRH flag, even if it's zero, to give the
* hypervisor a clear "yes" or "no" instead of a "perhaps"
*/
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
/*
* only if GRH is TRUE we might consider SOURCE_GID_IDX
* and DEST_GID otherwise phype will return H_ATTR_PARM!!!
*/
if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
mqpcb->send_grh_flag_al = 1;
for (cnt = 0; cnt < 16; cnt++)
mqpcb->dest_gid_al.byte[cnt] =
attr->alt_ah_attr.grh.dgid.raw[cnt];
mqpcb->source_gid_idx_al =
attr->alt_ah_attr.grh.sgid_index;
mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
mqpcb->traffic_class_al =
attr->alt_ah_attr.grh.traffic_class;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
| EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
}
}
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
}
if (attr_mask & IB_QP_SQ_PSN) {
mqpcb->send_psn = attr->sq_psn;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
}
if (attr_mask & IB_QP_DEST_QPN) {
mqpcb->dest_qp_nr = attr->dest_qp_num;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
}
if (attr_mask & IB_QP_PATH_MIG_STATE) {
if (attr->path_mig_state != IB_MIG_REARM
&& attr->path_mig_state != IB_MIG_MIGRATED) {
ret = -EINVAL;
ehca_err(ibqp->device, "Invalid mig_state=%x",
attr->path_mig_state);
goto modify_qp_exit2;
}
mqpcb->path_migration_state = attr->path_mig_state + 1;
if (attr->path_mig_state == IB_MIG_REARM)
my_qp->mig_armed = 1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
}
if (attr_mask & IB_QP_CAP) {
mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
/* no support for max_send/recv_sge yet */
}
if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
update_mask,
mqpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
if ((my_qp->qp_type == IB_QPT_UD ||
my_qp->qp_type == IB_QPT_GSI ||
my_qp->qp_type == IB_QPT_SMI) &&
statetrans == IB_QPST_SQE2RTS) {
/* doorbell to reprocessing wqes */
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, bad_wqe_cnt-1);
ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
}
if (statetrans == IB_QPST_RESET2INIT ||
statetrans == IB_QPST_INIT2INIT) {
mqpcb->qp_enable = 1;
mqpcb->qp_state = EHCA_QPS_INIT;
update_mask = 0;
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
update_mask,
mqpcb,
my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "ENABLE in context of "
"RESET_2_INIT failed! Maybe you didn't get "
"a LID h_ret=%lli ehca_qp=%p qp_num=%x",
h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
}
if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
&& !is_user) {
ret = check_for_left_cqes(my_qp, shca);
if (ret)
goto modify_qp_exit2;
}
if (statetrans == IB_QPST_ANY2RESET) {
ipz_qeit_reset(&my_qp->ipz_rqueue);
ipz_qeit_reset(&my_qp->ipz_squeue);
if (qp_cur_state == IB_QPS_ERR && !is_user) {
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
if (HAS_RQ(my_qp))
del_from_err_list(my_qp->recv_cq,
&my_qp->rq_err_node);
}
if (!is_user)
reset_queue_map(&my_qp->sq_map);
if (HAS_RQ(my_qp) && !is_user)
reset_queue_map(&my_qp->rq_map);
}
if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey;
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
my_qp->sqerr_purgeflag = 1;
}
modify_qp_exit1:
ehca_free_fw_ctrlblock(mqpcb);
return ret;
}
int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
int ret = 0;
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
ib_device);
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
/* The if-block below caches qp_attr to be modified for GSI and SMI
* qps during the initialization by ib_mad. When the respective port
* is activated, ie we got an event PORT_ACTIVE, we'll replay the
* cached modify calls sequence, see ehca_recover_sqs() below.
* Why that is required:
* 1) If one port is connected, older code requires that port one
* to be connected and module option nr_ports=1 to be given by
* user, which is very inconvenient for end user.
* 2) Firmware accepts modify_qp() only if respective port has become
* active. Older code had a wait loop of 30sec create_qp()/
* define_aqp1(), which is not appropriate in practice. This
* code now removes that wait loop, see define_aqp1(), and always
* reports all ports to ib_mad resp. users. Only activated ports
* will then usable for the users.
*/
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
int port = my_qp->init_attr.port_num;
struct ehca_sport *sport = &shca->sport[port - 1];
unsigned long flags;
spin_lock_irqsave(&sport->mod_sqp_lock, flags);
/* cache qp_attr only during init */
if (my_qp->mod_qp_parm) {
struct ehca_mod_qp_parm *p;
if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
ehca_err(&shca->ib_device,
"mod_qp_parm overflow state=%x port=%x"
" type=%x", attr->qp_state,
my_qp->init_attr.port_num,
ibqp->qp_type);
spin_unlock_irqrestore(&sport->mod_sqp_lock,
flags);
return -EINVAL;
}
p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
p->mask = attr_mask;
p->attr = *attr;
my_qp->mod_qp_parm_idx++;
ehca_dbg(&shca->ib_device,
"Saved qp_attr for state=%x port=%x type=%x",
attr->qp_state, my_qp->init_attr.port_num,
ibqp->qp_type);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
goto out;
}
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
}
ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
out:
if ((ret == 0) && (attr_mask & IB_QP_STATE))
my_qp->state = attr->qp_state;
return ret;
}
void ehca_recover_sqp(struct ib_qp *sqp)
{
struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
int port = my_sqp->init_attr.port_num;
struct ib_qp_attr attr;
struct ehca_mod_qp_parm *qp_parm;
int i, qp_parm_idx, ret;
unsigned long flags, wr_cnt;
if (!my_sqp->mod_qp_parm)
return;
ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
qp_parm = my_sqp->mod_qp_parm;
qp_parm_idx = my_sqp->mod_qp_parm_idx;
for (i = 0; i < qp_parm_idx; i++) {
attr = qp_parm[i].attr;
ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
if (ret) {
ehca_err(sqp->device, "Could not modify SQP port=%x "
"qp_num=%x ret=%x", port, sqp->qp_num, ret);
goto free_qp_parm;
}
ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
port, sqp->qp_num, attr.qp_state);
}
/* re-trigger posted recv wrs */
wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
my_sqp->ipz_rqueue.qe_size;
if (wr_cnt) {
spin_lock_irqsave(&my_sqp->spinlock_r, flags);
hipz_update_rqa(my_sqp, wr_cnt);
spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
port, sqp->qp_num, wr_cnt);
}
free_qp_parm:
kfree(qp_parm);
/* this prevents subsequent calls to modify_qp() to cache qp_attr */
my_sqp->mod_qp_parm = NULL;
}
int ehca_query_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
ib_device);
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
struct hcp_modify_qp_control_block *qpcb;
int cnt, ret = 0;
u64 h_ret;
if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
ehca_err(qp->device, "Invalid attribute mask "
"ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
my_qp, qp->qp_num, qp_attr_mask);
return -EINVAL;
}
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) {
ehca_err(qp->device, "Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
return -ENOMEM;
}
h_ret = hipz_h_query_qp(adapter_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
qpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(qp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
}
qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
qp_attr->qp_state = qp_attr->cur_qp_state;
if (qp_attr->cur_qp_state == -EINVAL) {
ret = -EINVAL;
ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
qpcb->qp_state, my_qp, qp->qp_num);
goto query_qp_exit1;
}
if (qp_attr->qp_state == IB_QPS_SQD)
qp_attr->sq_draining = 1;
qp_attr->qkey = qpcb->qkey;
qp_attr->path_mtu = qpcb->path_mtu;
qp_attr->path_mig_state = qpcb->path_migration_state - 1;
qp_attr->rq_psn = qpcb->receive_psn;
qp_attr->sq_psn = qpcb->send_psn;
qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
/* UD_AV CIRCUMVENTION */
if (my_qp->qp_type == IB_QPT_UD) {
qp_attr->cap.max_send_sge =
qpcb->actual_nr_sges_in_sq_wqe - 2;
qp_attr->cap.max_recv_sge =
qpcb->actual_nr_sges_in_rq_wqe - 2;
} else {
qp_attr->cap.max_send_sge =
qpcb->actual_nr_sges_in_sq_wqe;
qp_attr->cap.max_recv_sge =
qpcb->actual_nr_sges_in_rq_wqe;
}
qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
qp_attr->dest_qp_num = qpcb->dest_qp_nr;
qp_attr->pkey_index = qpcb->prim_p_key_idx;
qp_attr->port_num = qpcb->prim_phys_port;
qp_attr->timeout = qpcb->timeout;
qp_attr->retry_cnt = qpcb->retry_count;
qp_attr->rnr_retry = qpcb->rnr_retry_count;
qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
qp_attr->alt_port_num = qpcb->alt_phys_port;
qp_attr->alt_timeout = qpcb->timeout_al;
qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
/* primary av */
qp_attr->ah_attr.sl = qpcb->service_level;
if (qpcb->send_grh_flag) {
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
}
qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
qp_attr->ah_attr.dlid = qpcb->dlid;
qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
qp_attr->ah_attr.port_num = qp_attr->port_num;
/* primary GRH */
qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
for (cnt = 0; cnt < 16; cnt++)
qp_attr->ah_attr.grh.dgid.raw[cnt] =
qpcb->dest_gid.byte[cnt];
/* alternate AV */
qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
if (qpcb->send_grh_flag_al) {
qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
}
qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
/* alternate GRH */
qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
for (cnt = 0; cnt < 16; cnt++)
qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
qpcb->dest_gid_al.byte[cnt];
/* return init attributes given in ehca_create_qp */
if (qp_init_attr)
*qp_init_attr = my_qp->init_attr;
if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1:
ehca_free_fw_ctrlblock(qpcb);
return ret;
}
int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
{
struct ehca_qp *my_qp =
container_of(ibsrq, struct ehca_qp, ib_srq);
struct ehca_shca *shca =
container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
struct hcp_modify_qp_control_block *mqpcb;
u64 update_mask;
u64 h_ret;
int ret = 0;
mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!mqpcb) {
ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
return -ENOMEM;
}
update_mask = 0;
if (attr_mask & IB_SRQ_LIMIT) {
attr_mask &= ~IB_SRQ_LIMIT;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
| EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
mqpcb->curr_srq_limit = attr->srq_limit;
mqpcb->qp_aff_asyn_ev_log_reg =
EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
}
/* by now, all bits in attr_mask should have been cleared */
if (attr_mask) {
ehca_err(ibsrq->device, "invalid attribute mask bits set "
"attr_mask=%x", attr_mask);
ret = -EINVAL;
goto modify_srq_exit0;
}
if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
NULL, update_mask, mqpcb,
my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x",
h_ret, my_qp, my_qp->real_qp_num);
}
modify_srq_exit0:
ehca_free_fw_ctrlblock(mqpcb);
return ret;
}
int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
{
struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
ib_device);
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
struct hcp_modify_qp_control_block *qpcb;
int ret = 0;
u64 h_ret;
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) {
ehca_err(srq->device, "Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
return -ENOMEM;
}
h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
NULL, qpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(srq->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, h_ret);
goto query_srq_exit1;
}
srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
srq_attr->max_sge = 3;
srq_attr->srq_limit = qpcb->curr_srq_limit;
if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
query_srq_exit1:
ehca_free_fw_ctrlblock(qpcb);
return ret;
}
static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
struct ib_uobject *uobject)
{
struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
ib_pd);
struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
u32 qp_num = my_qp->real_qp_num;
int ret;
u64 h_ret;
u8 port_num;
int is_user = 0;
enum ib_qp_type qp_type;
unsigned long flags;
if (uobject) {
is_user = 1;
if (my_qp->mm_count_galpa ||
my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
ehca_err(dev, "Resources still referenced in "
"user space qp_num=%x", qp_num);
return -EINVAL;
}
}
if (my_qp->send_cq) {
ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
if (ret) {
ehca_err(dev, "Couldn't unassign qp from "
"send_cq ret=%i qp_num=%x cq_num=%x", ret,
qp_num, my_qp->send_cq->cq_number);
return ret;
}
}
write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
/*
* SRQs will never get into an error list and do not have a recv_cq,
* so we need to skip them here.
*/
if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
if (HAS_SQ(my_qp) && !is_user)
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
/* now wait until all pending events have completed */
wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
return ehca2ib_return_code(h_ret);
}
port_num = my_qp->init_attr.port_num;
qp_type = my_qp->init_attr.qp_type;
if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
spin_lock_irqsave(&sport->mod_sqp_lock, flags);
kfree(my_qp->mod_qp_parm);
my_qp->mod_qp_parm = NULL;
shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
}
/* no support for IB_QPT_SMI yet */
if (qp_type == IB_QPT_GSI) {
struct ib_event event;
ehca_info(dev, "device %s: port %x is inactive.",
shca->ib_device.name, port_num);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port_num;
shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
ib_dispatch_event(&event);
}
if (HAS_RQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
if (!is_user)
vfree(my_qp->rq_map.map);
}
if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
if (!is_user)
vfree(my_qp->sq_map.map);
}
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
return 0;
}
int ehca_destroy_qp(struct ib_qp *qp)
{
return internal_destroy_qp(qp->device,
container_of(qp, struct ehca_qp, ib_qp),
qp->uobject);
}
int ehca_destroy_srq(struct ib_srq *srq)
{
return internal_destroy_qp(srq->device,
container_of(srq, struct ehca_qp, ib_srq),
srq->uobject);
}
int ehca_init_qp_cache(void)
{
qp_cache = kmem_cache_create("ehca_cache_qp",
sizeof(struct ehca_qp), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!qp_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_qp_cache(void)
{
if (qp_cache)
kmem_cache_destroy(qp_cache);
}
| gpl-2.0 |
cannondalev2000/kernel_lge_msm8974 | arch/mips/emma/markeins/platform.c | 9290 | 5027 | /*
* Copyright(C) MontaVista Software Inc, 2006
*
* Author: dmitry pervushin <dpervushin@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/serial_8250.h>
#include <linux/mtd/physmap.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
#include <asm/time.h>
#include <asm/bcache.h>
#include <asm/irq.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/emma/emma2rh.h>
#define I2C_EMMA2RH "emma2rh-iic" /* must be in sync with IIC driver */
static struct resource i2c_emma_resources_0[] = {
{
.name = NULL,
.start = EMMA2RH_IRQ_PIIC0,
.end = EMMA2RH_IRQ_PIIC0,
.flags = IORESOURCE_IRQ
}, {
.name = NULL,
.start = EMMA2RH_PIIC0_BASE,
.end = EMMA2RH_PIIC0_BASE + 0x1000,
.flags = 0
},
};
struct resource i2c_emma_resources_1[] = {
{
.name = NULL,
.start = EMMA2RH_IRQ_PIIC1,
.end = EMMA2RH_IRQ_PIIC1,
.flags = IORESOURCE_IRQ
}, {
.name = NULL,
.start = EMMA2RH_PIIC1_BASE,
.end = EMMA2RH_PIIC1_BASE + 0x1000,
.flags = 0
},
};
struct resource i2c_emma_resources_2[] = {
{
.name = NULL,
.start = EMMA2RH_IRQ_PIIC2,
.end = EMMA2RH_IRQ_PIIC2,
.flags = IORESOURCE_IRQ
}, {
.name = NULL,
.start = EMMA2RH_PIIC2_BASE,
.end = EMMA2RH_PIIC2_BASE + 0x1000,
.flags = 0
},
};
struct platform_device i2c_emma_devices[] = {
[0] = {
.name = I2C_EMMA2RH,
.id = 0,
.resource = i2c_emma_resources_0,
.num_resources = ARRAY_SIZE(i2c_emma_resources_0),
},
[1] = {
.name = I2C_EMMA2RH,
.id = 1,
.resource = i2c_emma_resources_1,
.num_resources = ARRAY_SIZE(i2c_emma_resources_1),
},
[2] = {
.name = I2C_EMMA2RH,
.id = 2,
.resource = i2c_emma_resources_2,
.num_resources = ARRAY_SIZE(i2c_emma_resources_2),
},
};
#define EMMA2RH_SERIAL_CLOCK 18544000
#define EMMA2RH_SERIAL_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST
static struct plat_serial8250_port platform_serial_ports[] = {
[0] = {
.membase= (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR0_BASE + 3),
.mapbase = EMMA2RH_PFUR0_BASE + 3,
.irq = EMMA2RH_IRQ_PFUR0,
.uartclk = EMMA2RH_SERIAL_CLOCK,
.regshift = 4,
.iotype = UPIO_MEM,
.flags = EMMA2RH_SERIAL_FLAGS,
}, [1] = {
.membase = (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR1_BASE + 3),
.mapbase = EMMA2RH_PFUR1_BASE + 3,
.irq = EMMA2RH_IRQ_PFUR1,
.uartclk = EMMA2RH_SERIAL_CLOCK,
.regshift = 4,
.iotype = UPIO_MEM,
.flags = EMMA2RH_SERIAL_FLAGS,
}, [2] = {
.membase = (void __iomem*)KSEG1ADDR(EMMA2RH_PFUR2_BASE + 3),
.mapbase = EMMA2RH_PFUR2_BASE + 3,
.irq = EMMA2RH_IRQ_PFUR2,
.uartclk = EMMA2RH_SERIAL_CLOCK,
.regshift = 4,
.iotype = UPIO_MEM,
.flags = EMMA2RH_SERIAL_FLAGS,
}, [3] = {
.flags = 0,
},
};
static struct platform_device serial_emma = {
.name = "serial8250",
.dev = {
.platform_data = &platform_serial_ports,
},
};
static struct mtd_partition markeins_parts[] = {
[0] = {
.name = "RootFS",
.offset = 0x00000000,
.size = 0x00c00000,
},
[1] = {
.name = "boot code area",
.offset = MTDPART_OFS_APPEND,
.size = 0x00100000,
},
[2] = {
.name = "kernel image",
.offset = MTDPART_OFS_APPEND,
.size = 0x00300000,
},
[3] = {
.name = "RootFS2",
.offset = MTDPART_OFS_APPEND,
.size = 0x00c00000,
},
[4] = {
.name = "boot code area2",
.offset = MTDPART_OFS_APPEND,
.size = 0x00100000,
},
[5] = {
.name = "kernel image2",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data markeins_flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(markeins_parts),
.parts = markeins_parts
};
static struct resource markeins_flash_resource = {
.start = 0x1e000000,
.end = 0x02000000,
.flags = IORESOURCE_MEM
};
static struct platform_device markeins_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &markeins_flash_data,
},
.num_resources = 1,
.resource = &markeins_flash_resource,
};
static struct platform_device *devices[] = {
i2c_emma_devices,
i2c_emma_devices + 1,
i2c_emma_devices + 2,
&serial_emma,
&markeins_flash_device,
};
static int __init platform_devices_setup(void)
{
return platform_add_devices(devices, ARRAY_SIZE(devices));
}
arch_initcall(platform_devices_setup);
| gpl-2.0 |
SlimRoms/kernel_samsung_tuna | drivers/net/atl1e/atl1e_param.c | 9290 | 7333 | /*
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/netdevice.h>
#include "atl1e.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define ATL1E_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
#define ATL1E_PARAM(x, desc) \
static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
static unsigned int num_##x; \
module_param_array_named(x, x, int, &num_##x, 0); \
MODULE_PARM_DESC(x, desc);
/* Transmit Memory count
*
* Valid Range: 64-2048
*
* Default Value: 128
*/
#define ATL1E_MIN_TX_DESC_CNT 32
#define ATL1E_MAX_TX_DESC_CNT 1020
#define ATL1E_DEFAULT_TX_DESC_CNT 128
ATL1E_PARAM(tx_desc_cnt, "Transmit description count");
/* Receive Memory Block Count
*
* Valid Range: 16-512
*
* Default Value: 128
*/
#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */
#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */
#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */
ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)");
/* User Specified MediaType Override
*
* Valid Range: 0-5
* - 0 - auto-negotiate at all supported speeds
* - 1 - only link at 100Mbps Full Duplex
* - 2 - only link at 100Mbps Half Duplex
* - 3 - only link at 10Mbps Full Duplex
* - 4 - only link at 10Mbps Half Duplex
* Default Value: 0
*/
ATL1E_PARAM(media_type, "MediaType Select");
/* Interrupt Moderate Timer in units of 2 us
*
* Valid Range: 10-65535
*
* Default Value: 45000(90ms)
*/
#define INT_MOD_DEFAULT_CNT 100 /* 200us */
#define INT_MOD_MAX_CNT 65000
#define INT_MOD_MIN_CNT 50
ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
#define FLASH_VENDOR_DEFAULT 0
#define FLASH_VENDOR_MIN 0
#define FLASH_VENDOR_MAX 2
struct atl1e_option {
enum { enable_option, range_option, list_option } type;
char *name;
char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct atl1e_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
netdev_info(adapter->netdev,
"%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
netdev_info(adapter->netdev,
"%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
netdev_info(adapter->netdev, "%s set to %i\n",
opt->name, *value);
return 0;
}
break;
case list_option:{
int i;
struct atl1e_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
netdev_info(adapter->netdev,
"%s\n", ent->str);
return 0;
}
}
break;
}
default:
BUG();
}
netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/*
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= ATL1E_MAX_NIC) {
netdev_notice(adapter->netdev,
"no configuration for board #%i\n", bd);
netdev_notice(adapter->netdev,
"Using defaults for all values\n");
}
{ /* Transmit Ring Size */
struct atl1e_option opt = {
.type = range_option,
.name = "Transmit Ddescription Count",
.err = "using default of "
__MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT),
.def = ATL1E_DEFAULT_TX_DESC_CNT,
.arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT,
.max = ATL1E_MAX_TX_DESC_CNT} }
};
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
atl1e_validate_option(&val, &opt, adapter);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
}
{ /* Receive Memory Block Count */
struct atl1e_option opt = {
.type = range_option,
.name = "Memory size of rx buffer(KB)",
.err = "using default of "
__MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE),
.def = ATL1E_DEFAULT_RX_MEM_SIZE,
.arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE,
.max = ATL1E_MAX_RX_MEM_SIZE} }
};
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
atl1e_validate_option(&val, &opt, adapter);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
}
}
{ /* Interrupt Moderate Timer */
struct atl1e_option opt = {
.type = range_option,
.name = "Interrupt Moderate Timer",
.err = "using default of "
__MODULE_STRING(INT_MOD_DEFAULT_CNT),
.def = INT_MOD_DEFAULT_CNT,
.arg = { .r = { .min = INT_MOD_MIN_CNT,
.max = INT_MOD_MAX_CNT} }
} ;
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
atl1e_validate_option(&val, &opt, adapter);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
}
{ /* MediaType */
struct atl1e_option opt = {
.type = range_option,
.name = "Speed/Duplex Selection",
.err = "using default of "
__MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR),
.def = MEDIA_TYPE_AUTO_SENSOR,
.arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR,
.max = MEDIA_TYPE_10M_HALF} }
} ;
int val;
if (num_media_type > bd) {
val = media_type[bd];
atl1e_validate_option(&val, &opt, adapter);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
}
}
| gpl-2.0 |
kuba160/tf300t-kernel | arch/arm/nwfpe/fpa11_cprt.c | 14666 | 9249 | /*
NetWinder Floating Point Emulator
(c) Rebel.COM, 1998,1999
(c) Philip Blundell, 1999, 2001
Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "fpa11.h"
#include "fpopcode.h"
#include "fpa11.inl"
#include "fpmodule.h"
#include "fpmodule.inl"
#include "softfloat.h"
unsigned int PerformFLT(const unsigned int opcode);
unsigned int PerformFIX(const unsigned int opcode);
static unsigned int PerformComparison(const unsigned int opcode);
unsigned int EmulateCPRT(const unsigned int opcode)
{
if (opcode & 0x800000) {
/* This is some variant of a comparison (PerformComparison
will sort out which one). Since most of the other CPRT
instructions are oddball cases of some sort or other it
makes sense to pull this out into a fast path. */
return PerformComparison(opcode);
}
/* Hint to GCC that we'd like a jump table rather than a load of CMPs */
switch ((opcode & 0x700000) >> 20) {
case FLT_CODE >> 20:
return PerformFLT(opcode);
break;
case FIX_CODE >> 20:
return PerformFIX(opcode);
break;
case WFS_CODE >> 20:
writeFPSR(readRegister(getRd(opcode)));
break;
case RFS_CODE >> 20:
writeRegister(getRd(opcode), readFPSR());
break;
default:
return 0;
}
return 1;
}
unsigned int PerformFLT(const unsigned int opcode)
{
FPA11 *fpa11 = GET_FPA11();
struct roundingData roundData;
roundData.mode = SetRoundingMode(opcode);
roundData.precision = SetRoundingPrecision(opcode);
roundData.exception = 0;
switch (opcode & MASK_ROUNDING_PRECISION) {
case ROUND_SINGLE:
{
fpa11->fType[getFn(opcode)] = typeSingle;
fpa11->fpreg[getFn(opcode)].fSingle = int32_to_float32(&roundData, readRegister(getRd(opcode)));
}
break;
case ROUND_DOUBLE:
{
fpa11->fType[getFn(opcode)] = typeDouble;
fpa11->fpreg[getFn(opcode)].fDouble = int32_to_float64(readRegister(getRd(opcode)));
}
break;
#ifdef CONFIG_FPE_NWFPE_XP
case ROUND_EXTENDED:
{
fpa11->fType[getFn(opcode)] = typeExtended;
fpa11->fpreg[getFn(opcode)].fExtended = int32_to_floatx80(readRegister(getRd(opcode)));
}
break;
#endif
default:
return 0;
}
if (roundData.exception)
float_raise(roundData.exception);
return 1;
}
unsigned int PerformFIX(const unsigned int opcode)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int Fn = getFm(opcode);
struct roundingData roundData;
roundData.mode = SetRoundingMode(opcode);
roundData.precision = SetRoundingPrecision(opcode);
roundData.exception = 0;
switch (fpa11->fType[Fn]) {
case typeSingle:
{
writeRegister(getRd(opcode), float32_to_int32(&roundData, fpa11->fpreg[Fn].fSingle));
}
break;
case typeDouble:
{
writeRegister(getRd(opcode), float64_to_int32(&roundData, fpa11->fpreg[Fn].fDouble));
}
break;
#ifdef CONFIG_FPE_NWFPE_XP
case typeExtended:
{
writeRegister(getRd(opcode), floatx80_to_int32(&roundData, fpa11->fpreg[Fn].fExtended));
}
break;
#endif
default:
return 0;
}
if (roundData.exception)
float_raise(roundData.exception);
return 1;
}
/* This instruction sets the flags N, Z, C, V in the FPSR. */
static unsigned int PerformComparison(const unsigned int opcode)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int Fn = getFn(opcode), Fm = getFm(opcode);
int e_flag = opcode & 0x400000; /* 1 if CxFE */
int n_flag = opcode & 0x200000; /* 1 if CNxx */
unsigned int flags = 0;
#ifdef CONFIG_FPE_NWFPE_XP
floatx80 rFn, rFm;
/* Check for unordered condition and convert all operands to 80-bit
format.
?? Might be some mileage in avoiding this conversion if possible.
Eg, if both operands are 32-bit, detect this and do a 32-bit
comparison (cheaper than an 80-bit one). */
switch (fpa11->fType[Fn]) {
case typeSingle:
//printk("single.\n");
if (float32_is_nan(fpa11->fpreg[Fn].fSingle))
goto unordered;
rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle);
break;
case typeDouble:
//printk("double.\n");
if (float64_is_nan(fpa11->fpreg[Fn].fDouble))
goto unordered;
rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble);
break;
case typeExtended:
//printk("extended.\n");
if (floatx80_is_nan(fpa11->fpreg[Fn].fExtended))
goto unordered;
rFn = fpa11->fpreg[Fn].fExtended;
break;
default:
return 0;
}
if (CONSTANT_FM(opcode)) {
//printk("Fm is a constant: #%d.\n",Fm);
rFm = getExtendedConstant(Fm);
if (floatx80_is_nan(rFm))
goto unordered;
} else {
//printk("Fm = r%d which contains a ",Fm);
switch (fpa11->fType[Fm]) {
case typeSingle:
//printk("single.\n");
if (float32_is_nan(fpa11->fpreg[Fm].fSingle))
goto unordered;
rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle);
break;
case typeDouble:
//printk("double.\n");
if (float64_is_nan(fpa11->fpreg[Fm].fDouble))
goto unordered;
rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble);
break;
case typeExtended:
//printk("extended.\n");
if (floatx80_is_nan(fpa11->fpreg[Fm].fExtended))
goto unordered;
rFm = fpa11->fpreg[Fm].fExtended;
break;
default:
return 0;
}
}
if (n_flag)
rFm.high ^= 0x8000;
/* test for less than condition */
if (floatx80_lt(rFn, rFm))
flags |= CC_NEGATIVE;
/* test for equal condition */
if (floatx80_eq(rFn, rFm))
flags |= CC_ZERO;
/* test for greater than or equal condition */
if (floatx80_lt(rFm, rFn))
flags |= CC_CARRY;
#else
if (CONSTANT_FM(opcode)) {
/* Fm is a constant. Do the comparison in whatever precision
Fn happens to be stored in. */
if (fpa11->fType[Fn] == typeSingle) {
float32 rFm = getSingleConstant(Fm);
float32 rFn = fpa11->fpreg[Fn].fSingle;
if (float32_is_nan(rFn))
goto unordered;
if (n_flag)
rFm ^= 0x80000000;
/* test for less than condition */
if (float32_lt_nocheck(rFn, rFm))
flags |= CC_NEGATIVE;
/* test for equal condition */
if (float32_eq_nocheck(rFn, rFm))
flags |= CC_ZERO;
/* test for greater than or equal condition */
if (float32_lt_nocheck(rFm, rFn))
flags |= CC_CARRY;
} else {
float64 rFm = getDoubleConstant(Fm);
float64 rFn = fpa11->fpreg[Fn].fDouble;
if (float64_is_nan(rFn))
goto unordered;
if (n_flag)
rFm ^= 0x8000000000000000ULL;
/* test for less than condition */
if (float64_lt_nocheck(rFn, rFm))
flags |= CC_NEGATIVE;
/* test for equal condition */
if (float64_eq_nocheck(rFn, rFm))
flags |= CC_ZERO;
/* test for greater than or equal condition */
if (float64_lt_nocheck(rFm, rFn))
flags |= CC_CARRY;
}
} else {
/* Both operands are in registers. */
if (fpa11->fType[Fn] == typeSingle
&& fpa11->fType[Fm] == typeSingle) {
float32 rFm = fpa11->fpreg[Fm].fSingle;
float32 rFn = fpa11->fpreg[Fn].fSingle;
if (float32_is_nan(rFn)
|| float32_is_nan(rFm))
goto unordered;
if (n_flag)
rFm ^= 0x80000000;
/* test for less than condition */
if (float32_lt_nocheck(rFn, rFm))
flags |= CC_NEGATIVE;
/* test for equal condition */
if (float32_eq_nocheck(rFn, rFm))
flags |= CC_ZERO;
/* test for greater than or equal condition */
if (float32_lt_nocheck(rFm, rFn))
flags |= CC_CARRY;
} else {
/* Promote 32-bit operand to 64 bits. */
float64 rFm, rFn;
rFm = (fpa11->fType[Fm] == typeSingle) ?
float32_to_float64(fpa11->fpreg[Fm].fSingle)
: fpa11->fpreg[Fm].fDouble;
rFn = (fpa11->fType[Fn] == typeSingle) ?
float32_to_float64(fpa11->fpreg[Fn].fSingle)
: fpa11->fpreg[Fn].fDouble;
if (float64_is_nan(rFn)
|| float64_is_nan(rFm))
goto unordered;
if (n_flag)
rFm ^= 0x8000000000000000ULL;
/* test for less than condition */
if (float64_lt_nocheck(rFn, rFm))
flags |= CC_NEGATIVE;
/* test for equal condition */
if (float64_eq_nocheck(rFn, rFm))
flags |= CC_ZERO;
/* test for greater than or equal condition */
if (float64_lt_nocheck(rFm, rFn))
flags |= CC_CARRY;
}
}
#endif
writeConditionCodes(flags);
return 1;
unordered:
/* ?? The FPA data sheet is pretty vague about this, in particular
about whether the non-E comparisons can ever raise exceptions.
This implementation is based on a combination of what it says in
the data sheet, observation of how the Acorn emulator actually
behaves (and how programs expect it to) and guesswork. */
flags |= CC_OVERFLOW;
flags &= ~(CC_ZERO | CC_NEGATIVE);
if (BIT_AC & readFPSR())
flags |= CC_CARRY;
if (e_flag)
float_raise(float_flag_invalid);
writeConditionCodes(flags);
return 1;
}
| gpl-2.0 |
akopytov/percona-xtradb-cluster | tests/showdb_test.c | 75 | 1770 | /* Copyright (c) 2000, 2003, 2004 MySQL AB
Use is subject to license terms
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#ifdef __WIN__
#include <windows.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include "mysql.h"
#define SELECT_QUERY "select name from test where num = %d"
int main(int argc, char **argv)
{
int count, num;
MYSQL mysql,*sock;
MYSQL_RES *res;
char qbuf[160];
if (argc != 3)
{
fprintf(stderr,"usage : select_test <dbname> <num>\n\n");
exit(1);
}
mysql_init(&mysql);
if (!(sock = mysql_real_connect(&mysql,NULL,0,0,argv[1],0,NULL,0)))
{
fprintf(stderr,"Couldn't connect to engine!\n%s\n\n",mysql_error(&mysql));
perror("");
exit(1);
}
mysql.reconnect= 1;
count = 0;
num = atoi(argv[2]);
while (count < num)
{
sprintf(qbuf,SELECT_QUERY,count);
if(!(res=mysql_list_dbs(sock,NULL)))
{
fprintf(stderr,"Query failed (%s)\n",mysql_error(sock));
exit(1);
}
printf("number of fields: %d\n",mysql_num_rows(res));
mysql_free_result(res);
count++;
}
mysql_close(sock);
exit(0);
return 0; /* Keep some compilers happy */
}
| gpl-2.0 |
gokulnatha/GT-I9505 | drivers/mmc/core/sdio.c | 75 | 29771 | /*
* linux/drivers/mmc/sdio.c
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include "core.h"
#include "bus.h"
#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
#include "sdio_cis.h"
#ifdef CONFIG_MMC_EMBEDDED_SDIO
#include <linux/mmc/sdio_ids.h>
#endif
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
unsigned char data;
if (mmc_card_nonstd_func_interface(func->card)) {
func->class = SDIO_CLASS_NONE;
return 0;
}
ret = mmc_io_rw_direct(func->card, 0, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
if (ret)
goto out;
data &= 0x0f;
if (data == 0x0f) {
ret = mmc_io_rw_direct(func->card, 0, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF_EXT, 0, &data);
if (ret)
goto out;
}
func->class = data;
out:
return ret;
}
static int sdio_init_func(struct mmc_card *card, unsigned int fn)
{
int ret;
struct sdio_func *func;
BUG_ON(fn > SDIO_MAX_FUNCS);
func = sdio_alloc_func(card);
if (IS_ERR(func))
return PTR_ERR(func);
func->num = fn;
if (!(card->quirks & MMC_QUIRK_NONSTD_SDIO)) {
ret = sdio_read_fbr(func);
if (ret)
goto fail;
ret = sdio_read_func_cis(func);
if (ret)
goto fail;
} else {
func->vendor = func->card->cis.vendor;
func->device = func->card->cis.device;
func->max_blksize = func->card->cis.blksize;
}
card->sdio_func[fn - 1] = func;
return 0;
fail:
/*
* It is okay to remove the function here even though we hold
* the host lock as we haven't registered the device yet.
*/
sdio_remove_func(func);
return ret;
}
static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
{
int ret;
int cccr_vsn;
int uhs = ocr & R4_18V_PRESENT;
unsigned char data;
unsigned char speed;
memset(&card->cccr, 0, sizeof(struct sdio_cccr));
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
if (ret)
goto out;
cccr_vsn = data & 0x0f;
if (cccr_vsn > SDIO_CCCR_REV_3_00) {
pr_err("%s: unrecognised CCCR structure version %d\n",
mmc_hostname(card->host), cccr_vsn);
return -EINVAL;
}
card->cccr.sdio_vsn = (data & 0xf0) >> 4;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CAPS, 0, &data);
if (ret)
goto out;
if (data & SDIO_CCCR_CAP_SMB)
card->cccr.multi_block = 1;
if (data & SDIO_CCCR_CAP_LSC)
card->cccr.low_speed = 1;
if (data & SDIO_CCCR_CAP_4BLS)
card->cccr.wide_bus = 1;
if (cccr_vsn >= SDIO_CCCR_REV_1_10) {
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_POWER, 0, &data);
if (ret)
goto out;
if (data & SDIO_POWER_SMPC)
card->cccr.high_power = 1;
}
if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
goto out;
card->scr.sda_spec3 = 0;
card->sw_caps.sd3_bus_mode = 0;
card->sw_caps.sd3_drv_type = 0;
if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
card->scr.sda_spec3 = 1;
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_CCCR_UHS, 0, &data);
if (ret)
goto out;
if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50;
if (data & SDIO_UHS_SDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_SDR50;
if (data & SDIO_UHS_SDR104)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_SDR104;
}
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
if (ret)
goto out;
if (data & SDIO_DRIVE_SDTA)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
if (data & SDIO_DRIVE_SDTC)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
}
/* if no uhs mode ensure we check for high speed */
if (!card->sw_caps.sd3_bus_mode) {
if (speed & SDIO_SPEED_SHS) {
card->cccr.high_speed = 1;
card->sw_caps.hs_max_dtr = 50000000;
} else {
card->cccr.high_speed = 0;
card->sw_caps.hs_max_dtr = 25000000;
}
}
}
out:
return ret;
}
static int sdio_enable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
if (card->host->caps & MMC_CAP_8_BIT_DATA)
ctrl |= SDIO_BUS_WIDTH_8BIT;
else if (card->host->caps & MMC_CAP_4_BIT_DATA)
ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
return ret;
return 1;
}
/*
* If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
* of the card. This may be required on certain setups of boards,
* controllers and embedded sdio device which do not need the card's
* pull-up. As a result, card detection is disabled and power is saved.
*/
static int sdio_disable_cd(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!mmc_card_disable_cd(card))
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
ctrl |= SDIO_BUS_CD_DISABLE;
return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
}
/*
* Devices that remain active during a system suspend are
* put back into 1-bit mode.
*/
static int sdio_disable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
return 0;
ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
ctrl |= SDIO_BUS_ASYNC_INT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
return ret;
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1);
return 0;
}
static int sdio_enable_4bit_bus(struct mmc_card *card)
{
int err;
if (card->type == MMC_TYPE_SDIO)
return sdio_enable_wide(card);
if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
if (err)
return err;
} else
return 0;
err = sdio_enable_wide(card);
if (err <= 0)
mmc_app_set_bus_width(card, MMC_BUS_WIDTH_1);
return err;
}
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
{
int ret;
u8 speed;
if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
return 0;
if (!card->cccr.high_speed)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
return ret;
if (enable)
speed |= SDIO_SPEED_EHS;
else
speed &= ~SDIO_SPEED_EHS;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
if (ret)
return ret;
return 1;
}
/*
* Enable SDIO/combo card's high-speed mode. Return 0/1 if [not]supported.
*/
static int sdio_enable_hs(struct mmc_card *card)
{
int ret;
ret = mmc_sdio_switch_hs(card, true);
if (ret <= 0 || card->type == MMC_TYPE_SDIO)
return ret;
ret = mmc_sd_switch_hs(card);
if (ret <= 0)
mmc_sdio_switch_hs(card, false);
return ret;
}
static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr;
if (mmc_card_highspeed(card)) {
/*
* The SDIO specification doesn't mention how
* the CIS transfer speed register relates to
* high-speed, but it seems that 50 MHz is
* mandatory.
*/
max_dtr = 50000000;
} else {
max_dtr = card->cis.max_dtr;
}
if (card->type == MMC_TYPE_SD_COMBO)
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
return max_dtr;
}
static unsigned char host_drive_to_sdio_drive(int host_strength)
{
switch (host_strength) {
case MMC_SET_DRIVER_TYPE_A:
return SDIO_DTSx_SET_TYPE_A;
case MMC_SET_DRIVER_TYPE_B:
return SDIO_DTSx_SET_TYPE_B;
case MMC_SET_DRIVER_TYPE_C:
return SDIO_DTSx_SET_TYPE_C;
case MMC_SET_DRIVER_TYPE_D:
return SDIO_DTSx_SET_TYPE_D;
default:
return SDIO_DTSx_SET_TYPE_B;
}
}
static void sdio_select_driver_type(struct mmc_card *card)
{
int host_drv_type = SD_DRIVER_TYPE_B;
int card_drv_type = SD_DRIVER_TYPE_B;
int drive_strength;
unsigned char card_strength;
int err;
/*
* If the host doesn't support any of the Driver Types A,C or D,
* or there is no board specific handler then default Driver
* Type B is used.
*/
if (!(card->host->caps &
(MMC_CAP_DRIVER_TYPE_A |
MMC_CAP_DRIVER_TYPE_C |
MMC_CAP_DRIVER_TYPE_D)))
return;
if (!card->host->ops->select_drive_strength)
return;
if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
host_drv_type |= SD_DRIVER_TYPE_A;
if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
host_drv_type |= SD_DRIVER_TYPE_C;
if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
host_drv_type |= SD_DRIVER_TYPE_D;
if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
card_drv_type |= SD_DRIVER_TYPE_A;
if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
card_drv_type |= SD_DRIVER_TYPE_C;
if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
card_drv_type |= SD_DRIVER_TYPE_D;
/*
* The drive strength that the hardware can support
* depends on the board design. Pass the appropriate
* information and let the hardware specific code
* return what is possible given the options
*/
drive_strength = card->host->ops->select_drive_strength(
card->sw_caps.uhs_max_dtr,
host_drv_type, card_drv_type);
/* if error just use default for drive strength B */
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
&card_strength);
if (err)
return;
card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
card_strength |= host_drive_to_sdio_drive(drive_strength);
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
card_strength, NULL);
/* if error default to drive strength B */
if (!err)
mmc_set_driver_type(card->host, drive_strength);
}
static int sdio_set_bus_speed_mode(struct mmc_card *card)
{
unsigned int bus_speed, timing;
int err;
unsigned char speed;
/*
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (err)
return err;
speed &= ~SDIO_SPEED_BSS_MASK;
speed |= bus_speed;
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
if (err)
return err;
if (bus_speed) {
mmc_set_timing(card->host, timing);
mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
}
return 0;
}
/*
* UHS-I specific initialization procedure
*/
static int mmc_sdio_init_uhs_card(struct mmc_card *card)
{
int err;
if (!card->scr.sda_spec3)
return 0;
/*
* Switch to wider bus (if supported).
*/
if (card->host->caps & MMC_CAP_4_BIT_DATA) {
err = sdio_enable_4bit_bus(card);
if (err > 0) {
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
err = 0;
}
}
/* Set the driver strength for the card */
sdio_select_driver_type(card);
/* Set bus speed mode of the card */
err = sdio_set_bus_speed_mode(card);
if (err)
goto out;
#if defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE)
/*
* Prevent tuning operation when init a card
* for WiFi operation with sdmmc.
*/
if (!strcmp(mmc_hostname(card->host), "mmc1"))
printk("%s: remove initialize and start re-tuning timer"
"to prevent CMD53 request timeout \n", mmc_hostname(card->host));
else
#endif
/* Initialize and start re-tuning timer */
if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
err = card->host->ops->execute_tuning(card->host,
MMC_SEND_TUNING_BLOCK);
out:
return err;
}
/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
* we're trying to reinitialise.
*/
static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard, int powered_resume)
{
struct mmc_card *card;
int err;
BUG_ON(!host);
WARN_ON(!host->claimed);
#if defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE)
/* If host that supports UHS-I sets S18R to 1 in arg of CMD5 to request
* change of signaling level to 1.8V
*/
if (host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_DDR50))
host->ocr |= R4_18V_PRESENT;
#endif
/*
* Inform the card of the voltage
*/
if (!powered_resume) {
/* The initialization should be done at 3.3 V I/O voltage. */
mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
err = mmc_send_io_op_cond(host, host->ocr, &ocr);
if (err)
goto err;
}
/*
* For SPI, enable CRC as appropriate.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
goto err;
}
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, NULL);
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto err;
}
if ((ocr & R4_MEMORY_PRESENT) &&
mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
mmc_remove_card(card);
return -ENOENT;
}
} else {
card->type = MMC_TYPE_SDIO;
if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
mmc_remove_card(card);
return -ENOENT;
}
}
/*
* Call the optional HC's init_card function to handle quirks.
*/
if (host->ops->init_card) {
mmc_host_clk_hold(host);
host->ops->init_card(host, card);
mmc_host_clk_release(host);
}
/*
* If the host and card support UHS-I mode request the card
* to switch to 1.8V signaling level. No 1.8v signalling if
* UHS mode is not enabled to maintain compatibilty and some
* systems that claim 1.8v signalling in fact do not support
* it.
*/
if ((ocr & R4_18V_PRESENT) && mmc_host_uhs(host)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
true);
if (err) {
ocr &= ~R4_18V_PRESENT;
host->ocr &= ~R4_18V_PRESENT;
}
err = 0;
} else {
ocr &= ~R4_18V_PRESENT;
host->ocr &= ~R4_18V_PRESENT;
}
/*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto remove;
/*
* Update oldcard with the new RCA received from the SDIO
* device -- we're doing this so that it's updated in the
* "card" struct when oldcard overwrites that later.
*/
if (oldcard)
oldcard->rca = card->rca;
}
/*
* Read CSD, before selecting the card
*/
if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_get_csd(host, card);
if (err)
return err;
mmc_decode_cid(card);
}
/*
* Select card, as all following commands rely on that.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto remove;
}
if (card->quirks & MMC_QUIRK_NONSTD_SDIO) {
/*
* This is non-standard SDIO device, meaning it doesn't
* have any CIA (Common I/O area) registers present.
* It's host's responsibility to fill cccr and cis
* structures in init_card().
*/
mmc_set_clock(host, card->cis.max_dtr);
if (card->cccr.high_speed) {
mmc_card_set_highspeed(card);
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
goto finish;
}
#ifdef CONFIG_MMC_EMBEDDED_SDIO
if (host->embedded_sdio_data.cccr)
memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
else {
#endif
/*
* Read the common registers.
*/
err = sdio_read_cccr(card, ocr);
if (err)
goto remove;
#ifdef CONFIG_MMC_EMBEDDED_SDIO
}
#endif
#ifdef CONFIG_MMC_EMBEDDED_SDIO
if (host->embedded_sdio_data.cis)
memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
else {
#endif
/*
* Read the common CIS tuples.
*/
err = sdio_read_common_cis(card);
if (err)
goto remove;
#ifdef CONFIG_MMC_EMBEDDED_SDIO
}
#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
mmc_remove_card(card);
if (!same)
return -ENOENT;
card = oldcard;
}
mmc_fixup_device(card, NULL);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
mmc_go_idle(host);
if (mmc_host_is_spi(host))
/* should not fail, as it worked previously */
mmc_spi_set_crc(host, use_spi_crc);
card->type = MMC_TYPE_SDIO;
} else
card->dev.type = &sd_type;
}
/*
* If needed, disconnect card detection pull-up resistor.
*/
err = sdio_disable_cd(card);
if (err)
goto remove;
/* Initialization sequence for UHS-I cards */
/* Only if card supports 1.8v and UHS signaling */
if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
err = mmc_sdio_init_uhs_card(card);
if (err)
goto remove;
/* Card is an ultra-high-speed card */
mmc_card_set_uhs(card);
} else {
/*
* Switch to high-speed (if supported).
*/
err = sdio_enable_hs(card);
if (err > 0)
mmc_sd_go_highspeed(card);
else if (err)
goto remove;
/*
* Change to the card's maximum speed.
*/
mmc_set_clock(host, mmc_sdio_get_max_clock(card));
/*
* Switch to wider bus (if supported).
*/
err = sdio_enable_4bit_bus(card);
if (err > 0) {
if (card->host->caps & MMC_CAP_8_BIT_DATA)
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
else if (card->host->caps & MMC_CAP_4_BIT_DATA)
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
} else if (err)
goto remove;
}
finish:
if (!oldcard)
host->card = card;
return 0;
remove:
if (!oldcard)
mmc_remove_card(card);
err:
return err;
}
/*
* Host is being removed. Free up the current card.
*/
static void mmc_sdio_remove(struct mmc_host *host)
{
int i;
BUG_ON(!host);
BUG_ON(!host->card);
for (i = 0;i < host->card->sdio_funcs;i++) {
if (host->card->sdio_func[i]) {
sdio_remove_func(host->card->sdio_func[i]);
host->card->sdio_func[i] = NULL;
}
}
mmc_remove_card(host->card);
host->card = NULL;
}
/*
* Card detection - card is alive.
*/
static int mmc_sdio_alive(struct mmc_host *host)
{
return mmc_select_card(host->card);
}
/*
* Card detection callback from host.
*/
static void mmc_sdio_detect(struct mmc_host *host)
{
int err;
BUG_ON(!host);
BUG_ON(!host->card);
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
err = pm_runtime_get_sync(&host->card->dev);
if (err < 0)
goto out;
}
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
mmc_release_host(host);
/*
* Tell PM core it's OK to power off the card now.
*
* The _sync variant is used in order to ensure that the card
* is left powered off in case an error occurred, and the card
* is going to be removed.
*
* Since there is no specific reason to believe a new user
* is about to show up at this point, the _sync variant is
* desirable anyway.
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(&host->card->dev);
out:
if (err) {
mmc_sdio_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
}
}
/*
* SDIO suspend. We need to suspend all functions separately.
* Therefore all registered functions must have drivers with suspend
* and resume methods. Failing that we simply remove the whole card.
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
int i, err = 0;
for (i = 0; i < host->card->sdio_funcs; i++) {
struct sdio_func *func = host->card->sdio_func[i];
if (func && sdio_func_present(func) && func->dev.driver) {
const struct dev_pm_ops *pmops = func->dev.driver->pm;
if (!pmops || !pmops->suspend || !pmops->resume) {
/* force removal of entire card in that case */
err = -ENOSYS;
} else
err = pmops->suspend(&func->dev);
if (err)
break;
}
}
while (err && --i >= 0) {
struct sdio_func *func = host->card->sdio_func[i];
if (func && sdio_func_present(func) && func->dev.driver) {
const struct dev_pm_ops *pmops = func->dev.driver->pm;
pmops->resume(&func->dev);
}
}
if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
mmc_claim_host(host);
sdio_disable_wide(host->card);
mmc_release_host(host);
}
return err;
}
static int mmc_sdio_resume(struct mmc_host *host)
{
int i, err = 0;
BUG_ON(!host);
BUG_ON(!host->card);
/* Basic card reinitialization. */
mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
sdio_reset(host);
mmc_go_idle(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
if (host->caps & MMC_CAP_8_BIT_DATA)
mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
else if (host->caps & MMC_CAP_4_BIT_DATA)
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
err = 0;
}
}
if (!err && host->sdio_irqs)
wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
/*
* If the card looked to be the same as before suspending, then
* we proceed to resume all card functions. If one of them returns
* an error then we simply return that error to the core and the
* card will be redetected as new. It is the responsibility of
* the function driver to perform further tests with the extra
* knowledge it has of the card to confirm the card is indeed the
* same as before suspending (same MAC address for network cards,
* etc.) and return an error otherwise.
*/
for (i = 0; !err && i < host->card->sdio_funcs; i++) {
struct sdio_func *func = host->card->sdio_func[i];
if (func && sdio_func_present(func) && func->dev.driver) {
const struct dev_pm_ops *pmops = func->dev.driver->pm;
err = pmops->resume(&func->dev);
}
}
return err;
}
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
/*
* Reset the card by performing the same steps that are taken by
* mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
*
* sdio_reset() is technically not needed. Having just powered up the
* hardware, it should already be in reset state. However, some
* platforms (such as SD8686 on OLPC) do not instantly cut power,
* meaning that a reset is required when restoring power soon after
* powering off. It is harmless in other cases.
*
* The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
* is not necessary for non-removable cards. However, it is required
* for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
* harmless in other situations.
*
* With these steps taken, mmc_select_voltage() is also required to
* restore the correct voltage setting of the card.
*/
/* The initialization should be done at 3.3 V I/O voltage. */
if (!mmc_card_keep_power(host))
mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
ret = mmc_send_io_op_cond(host, 0, &ocr);
if (ret)
goto out;
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
if (!host->ocr) {
ret = -EINVAL;
goto out;
}
if (mmc_host_uhs(host))
/* to query card if 1.8V signalling is supported */
host->ocr |= R4_18V_PRESENT;
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
out:
mmc_release_host(host);
return ret;
}
static const struct mmc_bus_ops mmc_sdio_ops = {
.remove = mmc_sdio_remove,
.detect = mmc_sdio_detect,
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
.power_restore = mmc_sdio_power_restore,
.alive = mmc_sdio_alive,
};
/*
* Starting point for SDIO card init.
*/
int mmc_attach_sdio(struct mmc_host *host)
{
int err, i, funcs;
u32 ocr;
struct mmc_card *card;
BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_send_io_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus(host, &mmc_sdio_ops);
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
/*
* Sanity check the voltages that the card claims to
* support.
*/
if (ocr & 0x7F) {
pr_warning("%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
ocr &= ~0x7F;
}
host->ocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!host->ocr) {
err = -EINVAL;
goto err;
}
/*
* Detect and init the card.
*/
if (mmc_host_uhs(host))
/* to query card if 1.8V signalling is supported */
host->ocr |= R4_18V_PRESENT;
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err) {
if (err == -EAGAIN) {
/*
* Retry initialization with S18R set to 0.
*/
host->ocr &= ~R4_18V_PRESENT;
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
}
if (err)
goto err;
}
card = host->card;
/*
* Enable runtime PM only if supported by host+card+board
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
/*
* Let runtime PM core know our card is active
*/
err = pm_runtime_set_active(&card->dev);
if (err)
goto remove;
/*
* Enable runtime PM for this card
*/
pm_runtime_enable(&card->dev);
}
/*
* The number of functions on the card is encoded inside
* the ocr.
*/
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
#ifdef CONFIG_MMC_EMBEDDED_SDIO
if (host->embedded_sdio_data.funcs)
card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
#endif
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
#ifdef CONFIG_MMC_EMBEDDED_SDIO
if (host->embedded_sdio_data.funcs) {
struct sdio_func *tmp;
tmp = sdio_alloc_func(host->card);
if (IS_ERR(tmp))
goto remove;
tmp->num = (i + 1);
card->sdio_func[i] = tmp;
tmp->class = host->embedded_sdio_data.funcs[i].f_class;
tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
tmp->vendor = card->cis.vendor;
tmp->device = card->cis.device;
} else {
#endif
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
#ifdef CONFIG_MMC_EMBEDDED_SDIO
}
#endif
/*
* Enable Runtime PM for this func (if supported)
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_enable(&card->sdio_func[i]->dev);
}
/*
* First add the card to the driver model...
*/
mmc_release_host(host);
err = mmc_add_card(host->card);
if (err)
goto remove_added;
/*
* ...then the SDIO functions.
*/
for (i = 0;i < funcs;i++) {
err = sdio_add_func(host->card->sdio_func[i]);
if (err)
goto remove_added;
}
mmc_claim_host(host);
return 0;
remove_added:
/* Remove without lock if the device has been added. */
mmc_sdio_remove(host);
mmc_claim_host(host);
remove:
/* And with lock if it hasn't been added. */
mmc_release_host(host);
if (host->card)
mmc_sdio_remove(host);
mmc_claim_host(host);
err:
mmc_detach_bus(host);
pr_err("%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err);
return err;
}
int sdio_reset_comm(struct mmc_card *card)
{
return mmc_power_restore_host(card->host);
}
EXPORT_SYMBOL(sdio_reset_comm);
| gpl-2.0 |
jyh0082007/sigTaint | fs/cifs/smb2inode.c | 331 | 7721 | /*
* fs/cifs/smb2inode.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
* Author(s): Pavel Shilovsky (pshilovsky@samba.org),
* Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
#include "fscache.h"
#include "smb2glob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
static int
smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
__u32 desired_access, __u32 create_disposition,
__u32 create_options, void *data, int command)
{
int rc, tmprc = 0;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = create_disposition;
oparms.create_options = create_options;
oparms.fid = &fid;
oparms.reconnect = false;
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
if (rc) {
kfree(utf16_path);
return rc;
}
switch (command) {
case SMB2_OP_DELETE:
break;
case SMB2_OP_QUERY_INFO:
tmprc = SMB2_query_info(xid, tcon, fid.persistent_fid,
fid.volatile_fid,
(struct smb2_file_all_info *)data);
break;
case SMB2_OP_MKDIR:
/*
* Directories are created through parameters in the
* SMB2_open() call.
*/
break;
case SMB2_OP_RENAME:
tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
fid.volatile_fid, (__le16 *)data);
break;
case SMB2_OP_HARDLINK:
tmprc = SMB2_set_hardlink(xid, tcon, fid.persistent_fid,
fid.volatile_fid, (__le16 *)data);
break;
case SMB2_OP_SET_EOF:
tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid,
fid.volatile_fid, current->tgid,
(__le64 *)data);
break;
case SMB2_OP_SET_INFO:
tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid,
fid.volatile_fid,
(FILE_BASIC_INFO *)data);
break;
default:
cifs_dbg(VFS, "Invalid command\n");
break;
}
rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (tmprc)
rc = tmprc;
kfree(utf16_path);
return rc;
}
void
move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src)
{
memcpy(dst, src, (size_t)(&src->CurrentByteOffset) - (size_t)src);
dst->CurrentByteOffset = src->CurrentByteOffset;
dst->Mode = src->Mode;
dst->AlignmentRequirement = src->AlignmentRequirement;
dst->IndexNumber1 = 0; /* we don't use it */
}
int
smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
FILE_ALL_INFO *data, bool *adjust_tz, bool *symlink)
{
int rc;
struct smb2_file_all_info *smb2_data;
*adjust_tz = false;
*symlink = false;
smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN, 0,
smb2_data, SMB2_OP_QUERY_INFO);
if (rc == -EOPNOTSUPP) {
*symlink = true;
/* Failed on a symbolic link - query a reparse point info */
rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
OPEN_REPARSE_POINT, smb2_data,
SMB2_OP_QUERY_INFO);
}
if (rc)
goto out;
move_smb2_info_to_cifs(data, smb2_data);
out:
kfree(smb2_data);
return rc;
}
int
smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_open_op_close(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
}
void
smb2_mkdir_setinfo(struct inode *inode, const char *name,
struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
const unsigned int xid)
{
FILE_BASIC_INFO data;
struct cifsInodeInfo *cifs_i;
u32 dosattrs;
int tmprc;
memset(&data, 0, sizeof(data));
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
int
smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
NULL, SMB2_OP_DELETE);
}
int
smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
NULL, SMB2_OP_DELETE);
}
static int
smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb, __u32 access, int command)
{
__le16 *smb2_to_name = NULL;
int rc;
smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
if (smb2_to_name == NULL) {
rc = -ENOMEM;
goto smb2_rename_path;
}
rc = smb2_open_op_close(xid, tcon, cifs_sb, from_name, access,
FILE_OPEN, 0, smb2_to_name, command);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
}
int
smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb)
{
return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
DELETE, SMB2_OP_RENAME);
}
int
smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
const char *from_name, const char *to_name,
struct cifs_sb_info *cifs_sb)
{
return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK);
}
int
smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, __u64 size,
struct cifs_sb_info *cifs_sb, bool set_alloc)
{
__le64 eof = cpu_to_le64(size);
return smb2_open_op_close(xid, tcon, cifs_sb, full_path,
FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
SMB2_OP_SET_EOF);
}
int
smb2_set_file_info(struct inode *inode, const char *full_path,
FILE_BASIC_INFO *buf, const unsigned int xid)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
int rc;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
SMB2_OP_SET_INFO);
cifs_put_tlink(tlink);
return rc;
}
| gpl-2.0 |
Jovy23/N930TUVU1APGC_Kernel | net/dsa/tag_edsa.c | 331 | 5131 | /*
* net/dsa/tag_edsa.c - Ethertype DSA tagging
* Copyright (c) 2008-2009 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "dsa_priv.h"
#define DSA_HLEN 4
#define EDSA_HLEN 8
static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *edsa_header;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/*
* Convert the outermost 802.1q tag to a DSA tag and prepend
* a DSA ethertype field is the packet is tagged, or insert
* a DSA ethertype plus DSA tag between the addresses and the
* current ethertype field if the packet is untagged.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
if (skb_cow_head(skb, DSA_HLEN) < 0)
goto out_free;
skb_push(skb, DSA_HLEN);
memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
/*
* Construct tagged FROM_CPU DSA tag from 802.1q tag.
*/
edsa_header = skb->data + 2 * ETH_ALEN;
edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
edsa_header[1] = ETH_P_EDSA & 0xff;
edsa_header[2] = 0x00;
edsa_header[3] = 0x00;
edsa_header[4] = 0x60 | p->parent->index;
edsa_header[5] = p->port << 3;
/*
* Move CFI field from byte 6 to byte 5.
*/
if (edsa_header[6] & 0x10) {
edsa_header[5] |= 0x01;
edsa_header[6] &= ~0x10;
}
} else {
if (skb_cow_head(skb, EDSA_HLEN) < 0)
goto out_free;
skb_push(skb, EDSA_HLEN);
memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
/*
* Construct untagged FROM_CPU DSA tag.
*/
edsa_header = skb->data + 2 * ETH_ALEN;
edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
edsa_header[1] = ETH_P_EDSA & 0xff;
edsa_header[2] = 0x00;
edsa_header[3] = 0x00;
edsa_header[4] = 0x40 | p->parent->index;
edsa_header[5] = p->port << 3;
edsa_header[6] = 0x00;
edsa_header[7] = 0x00;
}
skb->protocol = htons(ETH_P_EDSA);
skb->dev = p->parent->dst->master_netdev;
dev_queue_xmit(skb);
return NETDEV_TX_OK;
out_free:
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
struct dsa_switch *ds;
u8 *edsa_header;
int source_device;
int source_port;
if (unlikely(dst == NULL))
goto out_drop;
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL)
goto out;
if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
goto out_drop;
/*
* Skip the two null bytes after the ethertype.
*/
edsa_header = skb->data + 2;
/*
* Check that frame type is either TO_CPU or FORWARD.
*/
if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
goto out_drop;
/*
* Determine source device and port.
*/
source_device = edsa_header[0] & 0x1f;
source_port = (edsa_header[1] >> 3) & 0x1f;
/*
* Check that the source device exists and that the source
* port is a registered DSA port.
*/
if (source_device >= dst->pd->nr_chips)
goto out_drop;
ds = dst->ds[source_device];
if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
goto out_drop;
/*
* If the 'tagged' bit is set, convert the DSA tag to a 802.1q
* tag and delete the ethertype part. If the 'tagged' bit is
* clear, delete the ethertype and the DSA tag parts.
*/
if (edsa_header[0] & 0x20) {
u8 new_header[4];
/*
* Insert 802.1q ethertype and copy the VLAN-related
* fields, but clear the bit that will hold CFI (since
* DSA uses that bit location for another purpose).
*/
new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
new_header[1] = ETH_P_8021Q & 0xff;
new_header[2] = edsa_header[2] & ~0x10;
new_header[3] = edsa_header[3];
/*
* Move CFI bit from its place in the DSA header to
* its 802.1q-designated place.
*/
if (edsa_header[1] & 0x01)
new_header[2] |= 0x10;
skb_pull_rcsum(skb, DSA_HLEN);
/*
* Update packet checksum if skb is CHECKSUM_COMPLETE.
*/
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__wsum c = skb->csum;
c = csum_add(c, csum_partial(new_header + 2, 2, 0));
c = csum_sub(c, csum_partial(edsa_header + 2, 2, 0));
skb->csum = c;
}
memcpy(edsa_header, new_header, DSA_HLEN);
memmove(skb->data - ETH_HLEN,
skb->data - ETH_HLEN - DSA_HLEN,
2 * ETH_ALEN);
} else {
/*
* Remove DSA tag and update checksum.
*/
skb_pull_rcsum(skb, EDSA_HLEN);
memmove(skb->data - ETH_HLEN,
skb->data - ETH_HLEN - EDSA_HLEN,
2 * ETH_ALEN);
}
skb->dev = ds->ports[source_port];
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
return 0;
out_drop:
kfree_skb(skb);
out:
return 0;
}
const struct dsa_device_ops edsa_netdev_ops = {
.xmit = edsa_xmit,
.rcv = edsa_rcv,
};
| gpl-2.0 |
omnirom/android_kernel_asus_tegra3 | sound/soc/tegra/tegra20_spdif.c | 331 | 12175 | /*
* tegra20_spdif.c - Tegra20 SPDIF driver
*
* Author: Stephen Warren <swarren@nvidia.com>
* Copyright (C) 2011 - NVIDIA, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/iomap.h>
#include <mach/hdmi-audio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "tegra20_spdif.h"
#define DRV_NAME "tegra20-spdif"
static inline void tegra20_spdif_write(struct tegra20_spdif *spdif, u32 reg,
u32 val)
{
#ifdef CONFIG_PM
if (reg < TEGRA20_SPDIF_CH_STA_TX_A)
spdif->reg_ctrl_cache[reg >> 2] = val;
else
spdif->reg_tx_cache[((reg - TEGRA20_SPDIF_CH_STA_TX_A) >> 2)]
= val;
#endif
__raw_writel(val, spdif->regs + reg);
}
static inline u32 tegra20_spdif_read(struct tegra20_spdif *spdif, u32 reg)
{
return __raw_readl(spdif->regs + reg);
}
#ifdef CONFIG_DEBUG_FS
static int tegra20_spdif_show(struct seq_file *s, void *unused)
{
#define REG(r) { r, #r }
static const struct {
int offset;
const char *name;
} regs[] = {
REG(TEGRA20_SPDIF_CTRL),
REG(TEGRA20_SPDIF_STATUS),
REG(TEGRA20_SPDIF_STROBE_CTRL),
REG(TEGRA20_SPDIF_DATA_FIFO_CSR),
REG(TEGRA20_SPDIF_CH_STA_RX_A),
REG(TEGRA20_SPDIF_CH_STA_RX_B),
REG(TEGRA20_SPDIF_CH_STA_RX_C),
REG(TEGRA20_SPDIF_CH_STA_RX_D),
REG(TEGRA20_SPDIF_CH_STA_RX_E),
REG(TEGRA20_SPDIF_CH_STA_RX_F),
REG(TEGRA20_SPDIF_CH_STA_TX_A),
REG(TEGRA20_SPDIF_CH_STA_TX_B),
REG(TEGRA20_SPDIF_CH_STA_TX_C),
REG(TEGRA20_SPDIF_CH_STA_TX_D),
REG(TEGRA20_SPDIF_CH_STA_TX_E),
REG(TEGRA20_SPDIF_CH_STA_TX_F),
};
#undef REG
struct tegra20_spdif *spdif = s->private;
int i;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
u32 val = tegra20_spdif_read(spdif, regs[i].offset);
seq_printf(s, "%s = %08x\n", regs[i].name, val);
}
return 0;
}
static int tegra20_spdif_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, tegra20_spdif_show, inode->i_private);
}
static const struct file_operations tegra20_spdif_debug_fops = {
.open = tegra20_spdif_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void tegra20_spdif_debug_add(struct tegra20_spdif *spdif)
{
spdif->debug = debugfs_create_file(DRV_NAME, S_IRUGO,
snd_soc_debugfs_root, spdif,
&tegra20_spdif_debug_fops);
}
static void tegra20_spdif_debug_remove(struct tegra20_spdif *spdif)
{
if (spdif->debug)
debugfs_remove(spdif->debug);
}
#else
static inline void tegra20_spdif_debug_add(struct tegra20_spdif *spdif)
{
}
static inline void tegra20_spdif_debug_remove(struct tegra20_spdif *spdif)
{
}
#endif
static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct device *dev = substream->pcm->card->dev;
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
int ret, srate, spdifclock;
u32 ch_sta[2] = {0, 0};
spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_PACK;
spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_PACK;
spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
break;
default:
return -EINVAL;
}
srate = params_rate(params);
ch_sta[0] = tegra20_spdif_read(spdif, TEGRA20_SPDIF_CH_STA_TX_A);
ch_sta[0] &= ~TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_MASK;
ch_sta[1] = tegra20_spdif_read(spdif, TEGRA20_SPDIF_CH_STA_TX_B);
ch_sta[1] &= ~TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_MASK;
switch (params_rate(params)) {
case 32000:
spdifclock = 4096000;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_32000;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_32000;
break;
case 44100:
spdifclock = 5644800;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_44100;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_44100;
break;
case 48000:
spdifclock = 6144000;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_48000;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_48000;
break;
case 88200:
spdifclock = 11289600;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_88200;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_88200;
break;
case 96000:
spdifclock = 12288000;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_96000;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_96000;
break;
case 176400:
spdifclock = 22579200;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_176400;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_176400;
break;
case 192000:
spdifclock = 24576000;
ch_sta[0] |= TEGRA20_SPDIF_CH_STA_TX_A_SAMP_FREQ_192000;
ch_sta[1] |= TEGRA20_SPDIF_CH_STA_TX_B_ORIG_SAMP_FREQ_192000;
break;
default:
return -EINVAL;
}
ret = clk_set_rate(spdif->clk_spdif_out, spdifclock);
if (ret) {
dev_err(dev, "Can't set SPDIF clock rate: %d\n", ret);
return ret;
}
clk_enable(spdif->clk_spdif_out);
tegra20_spdif_write(spdif, TEGRA20_SPDIF_CH_STA_TX_A, ch_sta[0]);
tegra20_spdif_write(spdif, TEGRA20_SPDIF_CH_STA_TX_B, ch_sta[1]);
clk_disable(spdif->clk_spdif_out);
ret = tegra_hdmi_setup_audio_freq_source(srate, SPDIF);
if (ret) {
dev_err(dev, "Can't set HDMI audio freq source: %d\n", ret);
return ret;
}
return 0;
}
static void tegra20_spdif_start_playback(struct tegra20_spdif *spdif)
{
spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_TC_EN | TEGRA20_SPDIF_CTRL_TX_EN;
tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
}
static void tegra20_spdif_stop_playback(struct tegra20_spdif *spdif)
{
spdif->reg_ctrl &= ~(TEGRA20_SPDIF_CTRL_TX_EN |
TEGRA20_SPDIF_CTRL_TC_EN);
tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
}
static int tegra20_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
clk_enable(spdif->clk_spdif_out);
tegra20_spdif_start_playback(spdif);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
tegra20_spdif_stop_playback(spdif);
clk_disable(spdif->clk_spdif_out);
break;
default:
return -EINVAL;
}
return 0;
}
static int tegra20_spdif_probe(struct snd_soc_dai *dai)
{
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
#ifdef CONFIG_PM
int i, reg;
#endif
dai->capture_dma_data = NULL;
dai->playback_dma_data = &spdif->playback_dma_data;
#ifdef CONFIG_PM
clk_enable(spdif->clk_spdif_out);
/* populate the spdif reg cache with POR values*/
for (i = 0; i < TEGRA20_SPDIF_CTRL_CACHE_SIZE; i++)
spdif->reg_ctrl_cache[i] = tegra20_spdif_read(spdif, i << 2);
for (i = 0; i < TEGRA20_SPDIF_TX_CACHE_SIZE; i++) {
reg = (TEGRA20_SPDIF_CH_STA_TX_A) + (i << 2);
spdif->reg_tx_cache[i] = tegra20_spdif_read(spdif, reg);
}
clk_disable(spdif->clk_spdif_out);
#endif
return 0;
}
#ifdef CONFIG_PM
int tegra20_spdif_resume(struct snd_soc_dai *cpu_dai)
{
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(cpu_dai);
int i, reg;
clk_enable(spdif->clk_spdif_out);
/*restore the spdif regs*/
for (i = 0; i < TEGRA20_SPDIF_CTRL_CACHE_SIZE; i++)
tegra20_spdif_write(spdif, i << 2, spdif->reg_ctrl_cache[i]);
for (i = 0; i < TEGRA20_SPDIF_TX_CACHE_SIZE; i++) {
reg = (TEGRA20_SPDIF_CH_STA_TX_A) + (i << 2);
tegra20_spdif_write(spdif, reg, spdif->reg_tx_cache[i]);
}
clk_disable(spdif->clk_spdif_out);
return 0;
}
#else
#define tegra20_spdif_resume NULL
#endif
static struct snd_soc_dai_ops tegra20_spdif_dai_ops = {
.hw_params = tegra20_spdif_hw_params,
.trigger = tegra20_spdif_trigger,
};
struct snd_soc_dai_driver tegra20_spdif_dai = {
.name = DRV_NAME,
.probe = tegra20_spdif_probe,
.resume = tegra20_spdif_resume,
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &tegra20_spdif_dai_ops,
};
static __devinit int tegra20_spdif_platform_probe(struct platform_device *pdev)
{
struct tegra20_spdif *spdif;
struct resource *mem, *memregion, *dmareq;
int ret;
u32 reg_val;
spdif = kzalloc(sizeof(struct tegra20_spdif), GFP_KERNEL);
if (!spdif) {
dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
ret = -ENOMEM;
goto exit;
}
dev_set_drvdata(&pdev->dev, spdif);
spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
if (IS_ERR(spdif->clk_spdif_out)) {
pr_err("Can't retrieve spdif clock\n");
ret = PTR_ERR(spdif->clk_spdif_out);
goto err_free;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "No memory resource\n");
ret = -ENODEV;
goto err_clk_put;
}
dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!dmareq) {
dev_err(&pdev->dev, "No DMA resource\n");
ret = -ENODEV;
goto err_clk_put;
}
memregion = request_mem_region(mem->start, resource_size(mem),
DRV_NAME);
if (!memregion) {
dev_err(&pdev->dev, "Memory region already claimed\n");
ret = -EBUSY;
goto err_clk_put;
}
spdif->regs = ioremap(mem->start, resource_size(mem));
if (!spdif->regs) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
goto err_release;
}
spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
spdif->playback_dma_data.wrap = 4;
spdif->playback_dma_data.width = 32;
spdif->playback_dma_data.req_sel = dmareq->start;
clk_enable(spdif->clk_spdif_out);
reg_val = tegra20_spdif_read(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR);
reg_val &= ~TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_MASK;
reg_val |= TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU4_WORD_FULL;
tegra20_spdif_write(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR, reg_val);
clk_disable(spdif->clk_spdif_out);
ret = snd_soc_register_dai(&pdev->dev, &tegra20_spdif_dai);
if (ret) {
dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
ret = -ENOMEM;
goto err_unmap;
}
tegra20_spdif_debug_add(spdif);
return 0;
err_unmap:
iounmap(spdif->regs);
err_release:
release_mem_region(mem->start, resource_size(mem));
err_clk_put:
clk_put(spdif->clk_spdif_out);
err_free:
kfree(spdif);
exit:
return ret;
}
static int __devexit tegra20_spdif_platform_remove(struct platform_device *pdev)
{
struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
struct resource *res;
snd_soc_unregister_dai(&pdev->dev);
tegra20_spdif_debug_remove(spdif);
iounmap(spdif->regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_put(spdif->clk_spdif_out);
kfree(spdif);
return 0;
}
static struct platform_driver tegra20_spdif_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = tegra20_spdif_platform_probe,
.remove = __devexit_p(tegra20_spdif_platform_remove),
};
static int __init snd_tegra20_spdif_init(void)
{
return platform_driver_register(&tegra20_spdif_driver);
}
module_init(snd_tegra20_spdif_init);
static void __exit snd_tegra20_spdif_exit(void)
{
platform_driver_unregister(&tegra20_spdif_driver);
}
module_exit(snd_tegra20_spdif_exit);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("Tegra SPDIF ASoC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
dtuchsch/rpi-linux-preempt_rt | net/netfilter/ipset/ip_set_hash_netportnet.c | 331 | 16937 | /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Kernel module implementing an IP set type: the hash:ip,port,net type */
#include <linux/jhash.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
/* 1 Forceadd support added */
#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,port,net");
/* Type specific function prefix */
#define HTYPE hash_netportnet
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
#define IPSET_NET_COUNT 2
/* IPv4 variant */
/* Member elements */
struct hash_netportnet4_elem {
union {
__be32 ip[2];
__be64 ipcmp;
};
__be16 port;
union {
u8 cidr[2];
u16 ccmp;
};
u8 nomatch:1;
u8 proto;
};
/* Common functions */
static inline bool
hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
const struct hash_netportnet4_elem *ip2,
u32 *multi)
{
return ip1->ipcmp == ip2->ipcmp &&
ip1->ccmp == ip2->ccmp &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static inline int
hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static inline void
hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static inline void
hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static inline void
hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
struct hash_netportnet4_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static inline void
hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
u8 cidr, bool inner)
{
if (inner) {
elem->ip[1] &= ip_set_netmask(cidr);
elem->cidr[1] = cidr;
} else {
elem->ip[0] &= ip_set_netmask(cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netportnet4_data_list(struct sk_buff *skb,
const struct hash_netportnet4_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
}
static inline void
hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
const struct hash_netportnet4_elem *d)
{
next->ipcmp = d->ipcmp;
next->port = d->port;
}
#define MTYPE hash_netportnet4
#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
e.ip[0] &= ip_set_netmask(e.cidr[0]);
e.ip[1] &= ip_set_netmask(e.cidr[1]);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
bool with_ports = false;
u8 cidr, cidr2;
int ret;
e.cidr[0] = e.cidr[1] = HOST_MASK;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr[0] = cidr;
}
if (tb[IPSET_ATTR_CIDR2]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
e.cidr[1] = cidr;
}
if (tb[IPSET_ATTR_PORT])
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else
return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || e.proto == IPPROTO_ICMP))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
return ret;
if (ip > ip_to)
swap(ip, ip_to);
if (unlikely(ip + UINT_MAX == ip_to))
return -IPSET_ERR_HASH_RANGE;
} else
ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
port_to = port = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) {
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
}
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
return ret;
if (ip2_from > ip2_to)
swap(ip2_from, ip2_to);
if (unlikely(ip2_from + UINT_MAX == ip2_to))
return -IPSET_ERR_HASH_RANGE;
} else
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
if (retried)
ip = ntohl(h->next.ip[0]);
while (!after(ip, ip_to)) {
e.ip[0] = htonl(ip);
ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr[0] = cidr;
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) {
e.port = htons(p);
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
: ip2_from;
while (!after(ip2, ip2_to)) {
e.ip[1] = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&cidr2);
e.cidr[1] = cidr2;
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
else
ret = 0;
ip2 = ip2_last + 1;
}
}
ip = ip_last + 1;
}
return ret;
}
/* IPv6 variant */
struct hash_netportnet6_elem {
union nf_inet_addr ip[2];
__be16 port;
union {
u8 cidr[2];
u16 ccmp;
};
u8 nomatch:1;
u8 proto;
};
/* Common functions */
static inline bool
hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
const struct hash_netportnet6_elem *ip2,
u32 *multi)
{
return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
ip1->ccmp == ip2->ccmp &&
ip1->port == ip2->port &&
ip1->proto == ip2->proto;
}
static inline int
hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
{
return elem->nomatch ? -ENOTEMPTY : 1;
}
static inline void
hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
{
elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
}
static inline void
hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
{
swap(*flags, elem->nomatch);
}
static inline void
hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
struct hash_netportnet6_elem *orig)
{
elem->ip[1] = orig->ip[1];
}
static inline void
hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
u8 cidr, bool inner)
{
if (inner) {
ip6_netmask(&elem->ip[1], cidr);
elem->cidr[1] = cidr;
} else {
ip6_netmask(&elem->ip[0], cidr);
elem->cidr[0] = cidr;
}
}
static bool
hash_netportnet6_data_list(struct sk_buff *skb,
const struct hash_netportnet6_elem *data)
{
u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
return 0;
nla_put_failure:
return 1;
}
static inline void
hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
const struct hash_netportnet6_elem *d)
{
next->port = d->port;
}
#undef MTYPE
#undef PF
#undef HOST_MASK
#define MTYPE hash_netportnet6
#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
e.cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e.cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
if (adt == IPSET_TEST)
e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&e.port, &e.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
static int
hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
int ret;
e.cidr[0] = e.cidr[1] = HOST_MASK;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
if (tb[IPSET_ATTR_CIDR])
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (tb[IPSET_ATTR_CIDR2])
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
e.cidr[1] > HOST_MASK))
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
if (tb[IPSET_ATTR_PORT])
e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
else
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
with_ports = ip_set_proto_with_ports(e.proto);
if (e.proto == 0)
return -IPSET_ERR_INVALID_PROTO;
} else
return -IPSET_ERR_MISSING_PROTO;
if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e.port = 0;
if (tb[IPSET_ATTR_CADT_FLAGS]) {
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_NOMATCH)
flags |= (IPSET_FLAG_NOMATCH << 16);
}
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
port = ntohs(e.port);
port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
if (port > port_to)
swap(port, port_to);
if (retried)
port = ntohs(h->next.port);
for (; port <= port_to; port++) {
e.port = htons(port);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
else
ret = 0;
}
return ret;
}
static struct ip_set_type hash_netportnet_type __read_mostly = {
.name = "hash:net,port,net",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
IPSET_TYPE_NOMATCH,
.dimension = IPSET_DIM_THREE,
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
.create = hash_netportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
[IPSET_ATTR_PROBES] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2] = { .type = NLA_NESTED },
[IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_PORT] = { .type = NLA_U16 },
[IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
static int __init
hash_netportnet_init(void)
{
return ip_set_type_register(&hash_netportnet_type);
}
static void __exit
hash_netportnet_fini(void)
{
ip_set_type_unregister(&hash_netportnet_type);
}
module_init(hash_netportnet_init);
module_exit(hash_netportnet_fini);
| gpl-2.0 |
DevriesL/HeroQLTE_ImageBreaker | drivers/staging/comedi/drivers/ni_65xx.c | 331 | 23825 | /*
* ni_65xx.c
* Comedi driver for National Instruments PCI-65xx static dio boards
*
* Copyright (C) 2006 Jon Grierson <jd@renko.co.uk>
* Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999,2002,2003 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: ni_65xx
* Description: National Instruments 65xx static dio boards
* Author: Jon Grierson <jd@renko.co.uk>,
* Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: testing
* Devices: (National Instruments) PCI-6509 [ni_65xx]
* (National Instruments) PXI-6509 [ni_65xx]
* (National Instruments) PCI-6510 [ni_65xx]
* (National Instruments) PCI-6511 [ni_65xx]
* (National Instruments) PXI-6511 [ni_65xx]
* (National Instruments) PCI-6512 [ni_65xx]
* (National Instruments) PXI-6512 [ni_65xx]
* (National Instruments) PCI-6513 [ni_65xx]
* (National Instruments) PXI-6513 [ni_65xx]
* (National Instruments) PCI-6514 [ni_65xx]
* (National Instruments) PXI-6514 [ni_65xx]
* (National Instruments) PCI-6515 [ni_65xx]
* (National Instruments) PXI-6515 [ni_65xx]
* (National Instruments) PCI-6516 [ni_65xx]
* (National Instruments) PCI-6517 [ni_65xx]
* (National Instruments) PCI-6518 [ni_65xx]
* (National Instruments) PCI-6519 [ni_65xx]
* (National Instruments) PCI-6520 [ni_65xx]
* (National Instruments) PCI-6521 [ni_65xx]
* (National Instruments) PXI-6521 [ni_65xx]
* (National Instruments) PCI-6528 [ni_65xx]
* (National Instruments) PXI-6528 [ni_65xx]
* Updated: Mon, 21 Jul 2014 12:49:58 +0000
*
* Configuration Options: not applicable, uses PCI auto config
*
* Based on the PCI-6527 driver by ds.
* The interrupt subdevice (subdevice 3) is probably broken for all
* boards except maybe the 6514.
*
* This driver previously inverted the outputs on PCI-6513 through to
* PCI-6519 and on PXI-6513 through to PXI-6515. It no longer inverts
* outputs on those cards by default as it didn't make much sense. If
* you require the outputs to be inverted on those cards for legacy
* reasons, set the module parameter "legacy_invert_outputs=true" when
* loading the module, or set "ni_65xx.legacy_invert_outputs=true" on
* the kernel command line if the driver is built in to the kernel.
*/
/*
* Manuals (available from ftp://ftp.natinst.com/support/manuals)
*
* 370106b.pdf 6514 Register Level Programmer Manual
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
/*
* PCI BAR1 Register Map
*/
/* Non-recurring Registers (8-bit except where noted) */
#define NI_65XX_ID_REG 0x00
#define NI_65XX_CLR_REG 0x01
#define NI_65XX_CLR_WDOG_INT (1 << 6)
#define NI_65XX_CLR_WDOG_PING (1 << 5)
#define NI_65XX_CLR_WDOG_EXP (1 << 4)
#define NI_65XX_CLR_EDGE_INT (1 << 3)
#define NI_65XX_CLR_OVERFLOW_INT (1 << 2)
#define NI_65XX_STATUS_REG 0x02
#define NI_65XX_STATUS_WDOG_INT (1 << 5)
#define NI_65XX_STATUS_FALL_EDGE (1 << 4)
#define NI_65XX_STATUS_RISE_EDGE (1 << 3)
#define NI_65XX_STATUS_INT (1 << 2)
#define NI_65XX_STATUS_OVERFLOW_INT (1 << 1)
#define NI_65XX_STATUS_EDGE_INT (1 << 0)
#define NI_65XX_CTRL_REG 0x03
#define NI_65XX_CTRL_WDOG_ENA (1 << 5)
#define NI_65XX_CTRL_FALL_EDGE_ENA (1 << 4)
#define NI_65XX_CTRL_RISE_EDGE_ENA (1 << 3)
#define NI_65XX_CTRL_INT_ENA (1 << 2)
#define NI_65XX_CTRL_OVERFLOW_ENA (1 << 1)
#define NI_65XX_CTRL_EDGE_ENA (1 << 0)
#define NI_65XX_REV_REG 0x04 /* 32-bit */
#define NI_65XX_FILTER_REG 0x08 /* 32-bit */
#define NI_65XX_RTSI_ROUTE_REG 0x0c /* 16-bit */
#define NI_65XX_RTSI_EDGE_REG 0x0e /* 16-bit */
#define NI_65XX_RTSI_WDOG_REG 0x10 /* 16-bit */
#define NI_65XX_RTSI_TRIG_REG 0x12 /* 16-bit */
#define NI_65XX_AUTO_CLK_SEL_REG 0x14 /* PXI-6528 only */
#define NI_65XX_AUTO_CLK_SEL_STATUS (1 << 1)
#define NI_65XX_AUTO_CLK_SEL_DISABLE (1 << 0)
#define NI_65XX_WDOG_CTRL_REG 0x15
#define NI_65XX_WDOG_CTRL_ENA (1 << 0)
#define NI_65XX_RTSI_CFG_REG 0x16
#define NI_65XX_RTSI_CFG_RISE_SENSE (1 << 2)
#define NI_65XX_RTSI_CFG_FALL_SENSE (1 << 1)
#define NI_65XX_RTSI_CFG_SYNC_DETECT (1 << 0)
#define NI_65XX_WDOG_STATUS_REG 0x17
#define NI_65XX_WDOG_STATUS_EXP (1 << 0)
#define NI_65XX_WDOG_INTERVAL_REG 0x18 /* 32-bit */
/* Recurring port registers (8-bit) */
#define NI_65XX_PORT(x) ((x) * 0x10)
#define NI_65XX_IO_DATA_REG(x) (0x40 + NI_65XX_PORT(x))
#define NI_65XX_IO_SEL_REG(x) (0x41 + NI_65XX_PORT(x))
#define NI_65XX_IO_SEL_OUTPUT (0 << 0)
#define NI_65XX_IO_SEL_INPUT (1 << 0)
#define NI_65XX_RISE_EDGE_ENA_REG(x) (0x42 + NI_65XX_PORT(x))
#define NI_65XX_FALL_EDGE_ENA_REG(x) (0x43 + NI_65XX_PORT(x))
#define NI_65XX_FILTER_ENA(x) (0x44 + NI_65XX_PORT(x))
#define NI_65XX_WDOG_HIZ_REG(x) (0x46 + NI_65XX_PORT(x))
#define NI_65XX_WDOG_ENA(x) (0x47 + NI_65XX_PORT(x))
#define NI_65XX_WDOG_HI_LO_REG(x) (0x48 + NI_65XX_PORT(x))
#define NI_65XX_RTSI_ENA(x) (0x49 + NI_65XX_PORT(x))
#define NI_65XX_PORT_TO_CHAN(x) ((x) * 8)
#define NI_65XX_CHAN_TO_PORT(x) ((x) / 8)
#define NI_65XX_CHAN_TO_MASK(x) (1 << ((x) % 8))
enum ni_65xx_boardid {
BOARD_PCI6509,
BOARD_PXI6509,
BOARD_PCI6510,
BOARD_PCI6511,
BOARD_PXI6511,
BOARD_PCI6512,
BOARD_PXI6512,
BOARD_PCI6513,
BOARD_PXI6513,
BOARD_PCI6514,
BOARD_PXI6514,
BOARD_PCI6515,
BOARD_PXI6515,
BOARD_PCI6516,
BOARD_PCI6517,
BOARD_PCI6518,
BOARD_PCI6519,
BOARD_PCI6520,
BOARD_PCI6521,
BOARD_PXI6521,
BOARD_PCI6528,
BOARD_PXI6528,
};
struct ni_65xx_board {
const char *name;
unsigned num_dio_ports;
unsigned num_di_ports;
unsigned num_do_ports;
unsigned legacy_invert:1;
};
static const struct ni_65xx_board ni_65xx_boards[] = {
[BOARD_PCI6509] = {
.name = "pci-6509",
.num_dio_ports = 12,
},
[BOARD_PXI6509] = {
.name = "pxi-6509",
.num_dio_ports = 12,
},
[BOARD_PCI6510] = {
.name = "pci-6510",
.num_di_ports = 4,
},
[BOARD_PCI6511] = {
.name = "pci-6511",
.num_di_ports = 8,
},
[BOARD_PXI6511] = {
.name = "pxi-6511",
.num_di_ports = 8,
},
[BOARD_PCI6512] = {
.name = "pci-6512",
.num_do_ports = 8,
},
[BOARD_PXI6512] = {
.name = "pxi-6512",
.num_do_ports = 8,
},
[BOARD_PCI6513] = {
.name = "pci-6513",
.num_do_ports = 8,
.legacy_invert = 1,
},
[BOARD_PXI6513] = {
.name = "pxi-6513",
.num_do_ports = 8,
.legacy_invert = 1,
},
[BOARD_PCI6514] = {
.name = "pci-6514",
.num_di_ports = 4,
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PXI6514] = {
.name = "pxi-6514",
.num_di_ports = 4,
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PCI6515] = {
.name = "pci-6515",
.num_di_ports = 4,
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PXI6515] = {
.name = "pxi-6515",
.num_di_ports = 4,
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PCI6516] = {
.name = "pci-6516",
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PCI6517] = {
.name = "pci-6517",
.num_do_ports = 4,
.legacy_invert = 1,
},
[BOARD_PCI6518] = {
.name = "pci-6518",
.num_di_ports = 2,
.num_do_ports = 2,
.legacy_invert = 1,
},
[BOARD_PCI6519] = {
.name = "pci-6519",
.num_di_ports = 2,
.num_do_ports = 2,
.legacy_invert = 1,
},
[BOARD_PCI6520] = {
.name = "pci-6520",
.num_di_ports = 1,
.num_do_ports = 1,
},
[BOARD_PCI6521] = {
.name = "pci-6521",
.num_di_ports = 1,
.num_do_ports = 1,
},
[BOARD_PXI6521] = {
.name = "pxi-6521",
.num_di_ports = 1,
.num_do_ports = 1,
},
[BOARD_PCI6528] = {
.name = "pci-6528",
.num_di_ports = 3,
.num_do_ports = 3,
},
[BOARD_PXI6528] = {
.name = "pxi-6528",
.num_di_ports = 3,
.num_do_ports = 3,
},
};
static bool ni_65xx_legacy_invert_outputs;
module_param_named(legacy_invert_outputs, ni_65xx_legacy_invert_outputs,
bool, 0444);
MODULE_PARM_DESC(legacy_invert_outputs,
"invert outputs of PCI/PXI-6513/6514/6515/6516/6517/6518/6519 for compatibility with old user code");
static unsigned int ni_65xx_num_ports(struct comedi_device *dev)
{
const struct ni_65xx_board *board = dev->board_ptr;
return board->num_dio_ports + board->num_di_ports + board->num_do_ports;
}
static void ni_65xx_disable_input_filters(struct comedi_device *dev)
{
unsigned int num_ports = ni_65xx_num_ports(dev);
int i;
/* disable input filtering on all ports */
for (i = 0; i < num_ports; ++i)
writeb(0x00, dev->mmio + NI_65XX_FILTER_ENA(i));
/* set filter interval to 0 (32bit reg) */
writel(0x00000000, dev->mmio + NI_65XX_FILTER_REG);
}
/* updates edge detection for base_chan to base_chan+31 */
static void ni_65xx_update_edge_detection(struct comedi_device *dev,
unsigned int base_chan,
unsigned int rising,
unsigned int falling)
{
unsigned int num_ports = ni_65xx_num_ports(dev);
unsigned int port;
if (base_chan >= NI_65XX_PORT_TO_CHAN(num_ports))
return;
for (port = NI_65XX_CHAN_TO_PORT(base_chan); port < num_ports; port++) {
int bitshift = (int)(NI_65XX_PORT_TO_CHAN(port) - base_chan);
unsigned int port_mask, port_rising, port_falling;
if (bitshift >= 32)
break;
if (bitshift >= 0) {
port_mask = ~0U >> bitshift;
port_rising = rising >> bitshift;
port_falling = falling >> bitshift;
} else {
port_mask = ~0U << -bitshift;
port_rising = rising << -bitshift;
port_falling = falling << -bitshift;
}
if (port_mask & 0xff) {
if (~port_mask & 0xff) {
port_rising |=
readb(dev->mmio +
NI_65XX_RISE_EDGE_ENA_REG(port)) &
~port_mask;
port_falling |=
readb(dev->mmio +
NI_65XX_FALL_EDGE_ENA_REG(port)) &
~port_mask;
}
writeb(port_rising & 0xff,
dev->mmio + NI_65XX_RISE_EDGE_ENA_REG(port));
writeb(port_falling & 0xff,
dev->mmio + NI_65XX_FALL_EDGE_ENA_REG(port));
}
}
}
static void ni_65xx_disable_edge_detection(struct comedi_device *dev)
{
/* clear edge detection for channels 0 to 31 */
ni_65xx_update_edge_detection(dev, 0, 0, 0);
/* clear edge detection for channels 32 to 63 */
ni_65xx_update_edge_detection(dev, 32, 0, 0);
/* clear edge detection for channels 64 to 95 */
ni_65xx_update_edge_detection(dev, 64, 0, 0);
}
static int ni_65xx_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long base_port = (unsigned long)s->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int chan_mask = NI_65XX_CHAN_TO_MASK(chan);
unsigned port = base_port + NI_65XX_CHAN_TO_PORT(chan);
unsigned int interval;
unsigned int val;
switch (data[0]) {
case INSN_CONFIG_FILTER:
/*
* The deglitch filter interval is specified in nanoseconds.
* The hardware supports intervals in 200ns increments. Round
* the user values up and return the actual interval.
*/
interval = (data[1] + 100) / 200;
if (interval > 0xfffff)
interval = 0xfffff;
data[1] = interval * 200;
/*
* Enable/disable the channel for deglitch filtering. Note
* that the filter interval is never set to '0'. This is done
* because other channels might still be enabled for filtering.
*/
val = readb(dev->mmio + NI_65XX_FILTER_ENA(port));
if (interval) {
writel(interval, dev->mmio + NI_65XX_FILTER_REG);
val |= chan_mask;
} else {
val &= ~chan_mask;
}
writeb(val, dev->mmio + NI_65XX_FILTER_ENA(port));
break;
case INSN_CONFIG_DIO_OUTPUT:
if (s->type != COMEDI_SUBD_DIO)
return -EINVAL;
writeb(NI_65XX_IO_SEL_OUTPUT,
dev->mmio + NI_65XX_IO_SEL_REG(port));
break;
case INSN_CONFIG_DIO_INPUT:
if (s->type != COMEDI_SUBD_DIO)
return -EINVAL;
writeb(NI_65XX_IO_SEL_INPUT,
dev->mmio + NI_65XX_IO_SEL_REG(port));
break;
case INSN_CONFIG_DIO_QUERY:
if (s->type != COMEDI_SUBD_DIO)
return -EINVAL;
val = readb(dev->mmio + NI_65XX_IO_SEL_REG(port));
data[1] = (val == NI_65XX_IO_SEL_INPUT) ? COMEDI_INPUT
: COMEDI_OUTPUT;
break;
default:
return -EINVAL;
}
return insn->n;
}
static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned long base_port = (unsigned long)s->private;
unsigned int base_chan = CR_CHAN(insn->chanspec);
int last_port_offset = NI_65XX_CHAN_TO_PORT(s->n_chan - 1);
unsigned read_bits = 0;
int port_offset;
for (port_offset = NI_65XX_CHAN_TO_PORT(base_chan);
port_offset <= last_port_offset; port_offset++) {
unsigned port = base_port + port_offset;
int base_port_channel = NI_65XX_PORT_TO_CHAN(port_offset);
unsigned port_mask, port_data, bits;
int bitshift = base_port_channel - base_chan;
if (bitshift >= 32)
break;
port_mask = data[0];
port_data = data[1];
if (bitshift > 0) {
port_mask >>= bitshift;
port_data >>= bitshift;
} else {
port_mask <<= -bitshift;
port_data <<= -bitshift;
}
port_mask &= 0xff;
port_data &= 0xff;
/* update the outputs */
if (port_mask) {
bits = readb(dev->mmio + NI_65XX_IO_DATA_REG(port));
bits ^= s->io_bits; /* invert if necessary */
bits &= ~port_mask;
bits |= (port_data & port_mask);
bits ^= s->io_bits; /* invert back */
writeb(bits, dev->mmio + NI_65XX_IO_DATA_REG(port));
}
/* read back the actual state */
bits = readb(dev->mmio + NI_65XX_IO_DATA_REG(port));
bits ^= s->io_bits; /* invert if necessary */
if (bitshift > 0)
bits <<= bitshift;
else
bits >>= -bitshift;
read_bits |= bits;
}
data[1] = read_bits;
return insn->n;
}
static irqreturn_t ni_65xx_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
unsigned int status;
status = readb(dev->mmio + NI_65XX_STATUS_REG);
if ((status & NI_65XX_STATUS_INT) == 0)
return IRQ_NONE;
if ((status & NI_65XX_STATUS_EDGE_INT) == 0)
return IRQ_NONE;
writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
dev->mmio + NI_65XX_CLR_REG);
comedi_buf_put(s, 0);
s->async->events |= COMEDI_CB_EOS;
comedi_event(dev, s);
return IRQ_HANDLED;
}
static int ni_65xx_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_OTHER);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_FOLLOW);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* Step 4: fix up any arguments */
/* Step 5: check channel list if it exists */
return 0;
}
static int ni_65xx_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
dev->mmio + NI_65XX_CLR_REG);
writeb(NI_65XX_CTRL_FALL_EDGE_ENA | NI_65XX_CTRL_RISE_EDGE_ENA |
NI_65XX_CTRL_INT_ENA | NI_65XX_CTRL_EDGE_ENA,
dev->mmio + NI_65XX_CTRL_REG);
return 0;
}
static int ni_65xx_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
return 0;
}
static int ni_65xx_intr_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = 0;
return insn->n;
}
static int ni_65xx_intr_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
switch (data[0]) {
case INSN_CONFIG_CHANGE_NOTIFY:
/* add instruction to check_insn_config_length() */
if (insn->n != 3)
return -EINVAL;
/* update edge detection for channels 0 to 31 */
ni_65xx_update_edge_detection(dev, 0, data[1], data[2]);
/* clear edge detection for channels 32 to 63 */
ni_65xx_update_edge_detection(dev, 32, 0, 0);
/* clear edge detection for channels 64 to 95 */
ni_65xx_update_edge_detection(dev, 64, 0, 0);
break;
case INSN_CONFIG_DIGITAL_TRIG:
/* check trigger number */
if (data[1] != 0)
return -EINVAL;
/* check digital trigger operation */
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
ni_65xx_disable_edge_detection(dev);
break;
case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
/*
* update edge detection for channels data[3]
* to (data[3] + 31)
*/
ni_65xx_update_edge_detection(dev, data[3],
data[4], data[5]);
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return insn->n;
}
/* ripped from mite.h and mite_setup2() to avoid mite dependancy */
#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
#define WENAB (1 << 7) /* window enable */
static int ni_65xx_mite_init(struct pci_dev *pcidev)
{
void __iomem *mite_base;
u32 main_phys_addr;
/* ioremap the MITE registers (BAR 0) temporarily */
mite_base = pci_ioremap_bar(pcidev, 0);
if (!mite_base)
return -ENOMEM;
/* set data window to main registers (BAR 1) */
main_phys_addr = pci_resource_start(pcidev, 1);
writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
/* finished with MITE registers */
iounmap(mite_base);
return 0;
}
static int ni_65xx_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_65xx_board *board = NULL;
struct comedi_subdevice *s;
unsigned i;
int ret;
if (context < ARRAY_SIZE(ni_65xx_boards))
board = &ni_65xx_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
ret = ni_65xx_mite_init(pcidev);
if (ret)
return ret;
dev->mmio = pci_ioremap_bar(pcidev, 1);
if (!dev->mmio)
return -ENOMEM;
writeb(NI_65XX_CLR_EDGE_INT | NI_65XX_CLR_OVERFLOW_INT,
dev->mmio + NI_65XX_CLR_REG);
writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
if (pcidev->irq) {
ret = request_irq(pcidev->irq, ni_65xx_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
dev_info(dev->class_dev, "board: %s, ID=0x%02x", dev->board_name,
readb(dev->mmio + NI_65XX_ID_REG));
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
if (board->num_di_ports) {
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_di_ports);
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_65xx_dio_insn_bits;
s->insn_config = ni_65xx_dio_insn_config;
/* the input ports always start at port 0 */
s->private = (void *)0;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
s = &dev->subdevices[1];
if (board->num_do_ports) {
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_do_ports);
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_65xx_dio_insn_bits;
/* the output ports always start after the input ports */
s->private = (void *)(unsigned long)board->num_di_ports;
/*
* Use the io_bits to handle the inverted outputs. Inverted
* outputs are only supported if the "legacy_invert_outputs"
* module parameter is set to "true".
*/
if (ni_65xx_legacy_invert_outputs && board->legacy_invert)
s->io_bits = 0xff;
/* reset all output ports to comedi '0' */
for (i = 0; i < board->num_do_ports; ++i) {
writeb(s->io_bits, /* inverted if necessary */
dev->mmio +
NI_65XX_IO_DATA_REG(board->num_di_ports + i));
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
s = &dev->subdevices[2];
if (board->num_dio_ports) {
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = NI_65XX_PORT_TO_CHAN(board->num_dio_ports);
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_65xx_dio_insn_bits;
s->insn_config = ni_65xx_dio_insn_config;
/* the input/output ports always start at port 0 */
s->private = (void *)0;
/* configure all ports for input */
for (i = 0; i < board->num_dio_ports; ++i) {
writeb(NI_65XX_IO_SEL_INPUT,
dev->mmio + NI_65XX_IO_SEL_REG(i));
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = ni_65xx_intr_insn_bits;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 1;
s->insn_config = ni_65xx_intr_insn_config;
s->do_cmdtest = ni_65xx_intr_cmdtest;
s->do_cmd = ni_65xx_intr_cmd;
s->cancel = ni_65xx_intr_cancel;
}
ni_65xx_disable_input_filters(dev);
ni_65xx_disable_edge_detection(dev);
return 0;
}
static void ni_65xx_detach(struct comedi_device *dev)
{
if (dev->mmio)
writeb(0x00, dev->mmio + NI_65XX_CTRL_REG);
comedi_pci_detach(dev);
}
static struct comedi_driver ni_65xx_driver = {
.driver_name = "ni_65xx",
.module = THIS_MODULE,
.auto_attach = ni_65xx_auto_attach,
.detach = ni_65xx_detach,
};
static int ni_65xx_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &ni_65xx_driver, id->driver_data);
}
static const struct pci_device_id ni_65xx_pci_table[] = {
{ PCI_VDEVICE(NI, 0x1710), BOARD_PXI6509 },
{ PCI_VDEVICE(NI, 0x7085), BOARD_PCI6509 },
{ PCI_VDEVICE(NI, 0x7086), BOARD_PXI6528 },
{ PCI_VDEVICE(NI, 0x7087), BOARD_PCI6515 },
{ PCI_VDEVICE(NI, 0x7088), BOARD_PCI6514 },
{ PCI_VDEVICE(NI, 0x70a9), BOARD_PCI6528 },
{ PCI_VDEVICE(NI, 0x70c3), BOARD_PCI6511 },
{ PCI_VDEVICE(NI, 0x70c8), BOARD_PCI6513 },
{ PCI_VDEVICE(NI, 0x70c9), BOARD_PXI6515 },
{ PCI_VDEVICE(NI, 0x70cc), BOARD_PCI6512 },
{ PCI_VDEVICE(NI, 0x70cd), BOARD_PXI6514 },
{ PCI_VDEVICE(NI, 0x70d1), BOARD_PXI6513 },
{ PCI_VDEVICE(NI, 0x70d2), BOARD_PXI6512 },
{ PCI_VDEVICE(NI, 0x70d3), BOARD_PXI6511 },
{ PCI_VDEVICE(NI, 0x7124), BOARD_PCI6510 },
{ PCI_VDEVICE(NI, 0x7125), BOARD_PCI6516 },
{ PCI_VDEVICE(NI, 0x7126), BOARD_PCI6517 },
{ PCI_VDEVICE(NI, 0x7127), BOARD_PCI6518 },
{ PCI_VDEVICE(NI, 0x7128), BOARD_PCI6519 },
{ PCI_VDEVICE(NI, 0x718b), BOARD_PCI6521 },
{ PCI_VDEVICE(NI, 0x718c), BOARD_PXI6521 },
{ PCI_VDEVICE(NI, 0x71c5), BOARD_PCI6520 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ni_65xx_pci_table);
static struct pci_driver ni_65xx_pci_driver = {
.name = "ni_65xx",
.id_table = ni_65xx_pci_table,
.probe = ni_65xx_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_65xx_driver, ni_65xx_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for NI PCI-65xx static dio boards");
MODULE_LICENSE("GPL");
| gpl-2.0 |
3EleVen/kernel_common | arch/arm/mm/cache-l2x0.c | 331 | 15909 | /*
* arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
static u32 l2x0_cache_id;
static unsigned int l2x0_sets;
static unsigned int l2x0_ways;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
static inline bool is_pl310_rev(int rev)
{
return (l2x0_cache_id &
(L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
(L2X0_CACHE_ID_PART_L310 | rev);
}
struct l2x0_regs l2x0_saved_regs;
struct l2x0_of_data {
void (*setup)(const struct device_node *, u32 *, u32 *);
void (*save)(void);
void (*resume)(void);
};
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
cpu_relax();
}
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait cache_wait_way
#endif
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
writel_relaxed(0, base + sync_reg_offset);
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
static inline void l2x0_clean_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
}
static inline void l2x0_inv_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_INV_LINE_PA);
}
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
static inline void debug_writel(unsigned long val)
{
if (outer_cache.set_debug)
outer_cache.set_debug(val);
}
static void pl310_set_debug(unsigned long val)
{
writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
}
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}
#define pl310_set_debug NULL
#endif
#ifdef CONFIG_PL310_ERRATA_588369
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
/* Clean by PA followed by Invalidate by PA */
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_INV_LINE_PA);
}
#else
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
}
#endif
static void l2x0_cache_sync(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
#ifdef CONFIG_PL310_ERRATA_727915
static void l2x0_for_each_set_way(void __iomem *reg)
{
int set;
int way;
unsigned long flags;
for (way = 0; way < l2x0_ways; way++) {
raw_spin_lock_irqsave(&l2x0_lock, flags);
for (set = 0; set < l2x0_sets; set++)
writel_relaxed((way << 28) | (set << 5), reg);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
}
#endif
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
}
static void l2x0_flush_all(void)
{
unsigned long flags;
#ifdef CONFIG_PL310_ERRATA_727915
if (is_pl310_rev(REV_PL310_R2P0)) {
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
return;
}
#endif
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_all(void)
{
unsigned long flags;
#ifdef CONFIG_PL310_ERRATA_727915
if (is_pl310_rev(REV_PL310_R2P0)) {
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
return;
}
#endif
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_all(void)
{
unsigned long flags;
/* invalidate all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
/* Invalidating when L2 is enabled is a nono */
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
l2x0_flush_line(start);
debug_writel(0x00);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
l2x0_flush_line(end);
debug_writel(0x00);
}
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2x0_inv_line(start);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
if ((end - start) >= l2x0_size) {
l2x0_clean_all();
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2x0_clean_line(start);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
if ((end - start) >= l2x0_size) {
l2x0_flush_all();
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
debug_writel(0x03);
while (start < blk_end) {
l2x0_flush_line(start);
start += CACHE_LINE_SIZE;
}
debug_writel(0x00);
if (blk_end < end) {
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_disable(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
writel_relaxed(0, l2x0_base + L2X0_CTRL);
dsb();
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_unlock(u32 cache_id)
{
int lockregs;
int i;
if (cache_id == L2X0_CACHE_ID_PART_L310)
lockregs = 8;
else
/* L210 and unknown types */
lockregs = 1;
for (i = 0; i < lockregs; i++) {
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
i * L2X0_LOCKDOWN_STRIDE);
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
i * L2X0_LOCKDOWN_STRIDE);
}
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
u32 aux;
u32 way_size = 0;
const char *type;
l2x0_base = base;
l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
l2x0_ways = 16;
else
l2x0_ways = 8;
type = "L310";
#ifdef CONFIG_PL310_ERRATA_753970
/* Unmapped register. */
sync_reg_offset = L2X0_DUMMY_REG;
#endif
outer_cache.set_debug = pl310_set_debug;
break;
case L2X0_CACHE_ID_PART_L210:
l2x0_ways = (aux >> 13) & 0xf;
type = "L210";
break;
default:
/* Assume unknown chips have 8 ways */
l2x0_ways = 8;
type = "L2x0 series";
break;
}
l2x0_way_mask = (1 << l2x0_ways) - 1;
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
way_size = SZ_1K << (way_size + 3);
l2x0_size = l2x0_ways * way_size;
l2x0_sets = way_size / CACHE_LINE_SIZE;
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
* accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* Make sure that I&D is not locked down when starting */
l2x0_unlock(l2x0_cache_id);
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_saved_regs.aux_ctrl = aux;
l2x0_inv_all();
/* enable L2X0 */
writel_relaxed(1, l2x0_base + L2X0_CTRL);
}
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all;
outer_cache.disable = l2x0_disable;
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
l2x0_ways, l2x0_cache_id, aux, l2x0_size);
}
#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[2] = { 0, 0 };
u32 tag = 0;
u32 dirty = 0;
u32 val = 0, mask = 0;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
}
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1]) {
mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
}
of_property_read_u32(np, "arm,dirty-latency", &dirty);
if (dirty) {
mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
}
static void __init pl310_of_setup(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
writel_relaxed(
((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
l2x0_base + L2X0_TAG_LATENCY_CTRL);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
writel_relaxed(
((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
l2x0_base + L2X0_DATA_LATENCY_CTRL);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
l2x0_base + L2X0_ADDR_FILTER_END);
writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
l2x0_base + L2X0_ADDR_FILTER_START);
}
}
static void __init pl310_save(void)
{
u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
L2X0_TAG_LATENCY_CTRL);
l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
L2X0_DATA_LATENCY_CTRL);
l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
L2X0_ADDR_FILTER_END);
l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
L2X0_ADDR_FILTER_START);
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
/*
* From r2p0, there is Prefetch offset/control register
*/
l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
L2X0_PREFETCH_CTRL);
/*
* From r3p0, there is Power control register
*/
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
L2X0_POWER_CTRL);
}
}
static void l2x0_resume(void)
{
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* restore aux ctrl and enable l2 */
l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
L2X0_AUX_CTRL);
l2x0_inv_all();
writel_relaxed(1, l2x0_base + L2X0_CTRL);
}
}
static void pl310_resume(void)
{
u32 l2x0_revision;
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* restore pl310 setup */
writel_relaxed(l2x0_saved_regs.tag_latency,
l2x0_base + L2X0_TAG_LATENCY_CTRL);
writel_relaxed(l2x0_saved_regs.data_latency,
l2x0_base + L2X0_DATA_LATENCY_CTRL);
writel_relaxed(l2x0_saved_regs.filter_end,
l2x0_base + L2X0_ADDR_FILTER_END);
writel_relaxed(l2x0_saved_regs.filter_start,
l2x0_base + L2X0_ADDR_FILTER_START);
l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
l2x0_base + L2X0_PREFETCH_CTRL);
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
writel_relaxed(l2x0_saved_regs.pwr_ctrl,
l2x0_base + L2X0_POWER_CTRL);
}
}
l2x0_resume();
}
static const struct l2x0_of_data pl310_data = {
pl310_of_setup,
pl310_save,
pl310_resume,
};
static const struct l2x0_of_data l2x0_data = {
l2x0_of_setup,
NULL,
l2x0_resume,
};
static const struct of_device_id l2x0_ids[] __initconst = {
{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
{}
};
int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
{
struct device_node *np;
struct l2x0_of_data *data;
struct resource res;
np = of_find_matching_node(NULL, l2x0_ids);
if (!np)
return -ENODEV;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
l2x0_base = ioremap(res.start, resource_size(&res));
if (!l2x0_base)
return -ENOMEM;
l2x0_saved_regs.phy_base = res.start;
data = of_match_node(l2x0_ids, np)->data;
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
if (data->setup)
data->setup(np, &aux_val, &aux_mask);
}
if (data->save)
data->save();
l2x0_init(l2x0_base, aux_val, aux_mask);
outer_cache.resume = data->resume;
return 0;
}
#endif
| gpl-2.0 |
palmer-dabbelt/linux | drivers/tty/serial/serial_ks8695.c | 587 | 16006 | /*
* Driver for KS8695 serial ports
*
* Based on drivers/serial/serial_amba.c, by Kam Lee.
*
* Copyright 2002-2005 Micrel Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/regs-uart.h>
#include <mach/regs-irq.h>
#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
#define SERIAL_KS8695_MAJOR 204
#define SERIAL_KS8695_MINOR 16
#define SERIAL_KS8695_DEVNAME "ttyAM"
#define SERIAL_KS8695_NR 1
/*
* Access macros for the KS8695 UART
*/
#define UART_GET_CHAR(p) (__raw_readl((p)->membase + KS8695_URRB) & 0xFF)
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + KS8695_URTH)
#define UART_GET_FCR(p) __raw_readl((p)->membase + KS8695_URFC)
#define UART_PUT_FCR(p, c) __raw_writel((c), (p)->membase + KS8695_URFC)
#define UART_GET_MSR(p) __raw_readl((p)->membase + KS8695_URMS)
#define UART_GET_LSR(p) __raw_readl((p)->membase + KS8695_URLS)
#define UART_GET_LCR(p) __raw_readl((p)->membase + KS8695_URLC)
#define UART_PUT_LCR(p, c) __raw_writel((c), (p)->membase + KS8695_URLC)
#define UART_GET_MCR(p) __raw_readl((p)->membase + KS8695_URMC)
#define UART_PUT_MCR(p, c) __raw_writel((c), (p)->membase + KS8695_URMC)
#define UART_GET_BRDR(p) __raw_readl((p)->membase + KS8695_URBD)
#define UART_PUT_BRDR(p, c) __raw_writel((c), (p)->membase + KS8695_URBD)
#define KS8695_CLR_TX_INT() __raw_writel(1 << KS8695_IRQ_UART_TX, KS8695_IRQ_VA + KS8695_INTST)
#define UART_DUMMY_LSR_RX 0x100
#define UART_PORT_SIZE (KS8695_USR - KS8695_URRB + 4)
static inline int tx_enabled(struct uart_port *port)
{
return port->unused[0] & 1;
}
static inline int rx_enabled(struct uart_port *port)
{
return port->unused[0] & 2;
}
static inline int ms_enabled(struct uart_port *port)
{
return port->unused[0] & 4;
}
static inline void ms_enable(struct uart_port *port, int enabled)
{
if(enabled)
port->unused[0] |= 4;
else
port->unused[0] &= ~4;
}
static inline void rx_enable(struct uart_port *port, int enabled)
{
if(enabled)
port->unused[0] |= 2;
else
port->unused[0] &= ~2;
}
static inline void tx_enable(struct uart_port *port, int enabled)
{
if(enabled)
port->unused[0] |= 1;
else
port->unused[0] &= ~1;
}
#ifdef SUPPORT_SYSRQ
static struct console ks8695_console;
#endif
static void ks8695uart_stop_tx(struct uart_port *port)
{
if (tx_enabled(port)) {
/* use disable_irq_nosync() and not disable_irq() to avoid self
* imposed deadlock by not waiting for irq handler to end,
* since this ks8695uart_stop_tx() is called from interrupt context.
*/
disable_irq_nosync(KS8695_IRQ_UART_TX);
tx_enable(port, 0);
}
}
static void ks8695uart_start_tx(struct uart_port *port)
{
if (!tx_enabled(port)) {
enable_irq(KS8695_IRQ_UART_TX);
tx_enable(port, 1);
}
}
static void ks8695uart_stop_rx(struct uart_port *port)
{
if (rx_enabled(port)) {
disable_irq(KS8695_IRQ_UART_RX);
rx_enable(port, 0);
}
}
static void ks8695uart_enable_ms(struct uart_port *port)
{
if (!ms_enabled(port)) {
enable_irq(KS8695_IRQ_UART_MODEM_STATUS);
ms_enable(port,1);
}
}
static void ks8695uart_disable_ms(struct uart_port *port)
{
if (ms_enabled(port)) {
disable_irq(KS8695_IRQ_UART_MODEM_STATUS);
ms_enable(port,0);
}
}
static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status, ch, lsr, flg, max_count = 256;
status = UART_GET_LSR(port); /* clears pending LSR interrupts */
while ((status & URLS_URDR) && max_count--) {
ch = UART_GET_CHAR(port);
flg = TTY_NORMAL;
port->icount.rx++;
/*
* Note that the error handling code is
* out of the main execution path
*/
lsr = UART_GET_LSR(port) | UART_DUMMY_LSR_RX;
if (unlikely(lsr & (URLS_URBI | URLS_URPE | URLS_URFE | URLS_URROE))) {
if (lsr & URLS_URBI) {
lsr &= ~(URLS_URFE | URLS_URPE);
port->icount.brk++;
if (uart_handle_break(port))
goto ignore_char;
}
if (lsr & URLS_URPE)
port->icount.parity++;
if (lsr & URLS_URFE)
port->icount.frame++;
if (lsr & URLS_URROE)
port->icount.overrun++;
lsr &= port->read_status_mask;
if (lsr & URLS_URBI)
flg = TTY_BREAK;
else if (lsr & URLS_URPE)
flg = TTY_PARITY;
else if (lsr & URLS_URFE)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, lsr, URLS_URROE, ch, flg);
ignore_char:
status = UART_GET_LSR(port);
}
tty_flip_buffer_push(&port->state->port);
return IRQ_HANDLED;
}
static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct circ_buf *xmit = &port->state->xmit;
unsigned int count;
if (port->x_char) {
KS8695_CLR_TX_INT();
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return IRQ_HANDLED;
}
if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
ks8695uart_stop_tx(port);
return IRQ_HANDLED;
}
count = 16; /* fifo size */
while (!uart_circ_empty(xmit) && (count-- > 0)) {
KS8695_CLR_TX_INT();
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
ks8695uart_stop_tx(port);
return IRQ_HANDLED;
}
static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status;
/*
* clear modem interrupt by reading MSR
*/
status = UART_GET_MSR(port);
if (status & URMS_URDDCD)
uart_handle_dcd_change(port, status & URMS_URDDCD);
if (status & URMS_URDDST)
port->icount.dsr++;
if (status & URMS_URDCTS)
uart_handle_cts_change(port, status & URMS_URDCTS);
if (status & URMS_URTERI)
port->icount.rng++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
return IRQ_HANDLED;
}
static unsigned int ks8695uart_tx_empty(struct uart_port *port)
{
return (UART_GET_LSR(port) & URLS_URTE) ? TIOCSER_TEMT : 0;
}
static unsigned int ks8695uart_get_mctrl(struct uart_port *port)
{
unsigned int result = 0;
unsigned int status;
status = UART_GET_MSR(port);
if (status & URMS_URDCD)
result |= TIOCM_CAR;
if (status & URMS_URDSR)
result |= TIOCM_DSR;
if (status & URMS_URCTS)
result |= TIOCM_CTS;
if (status & URMS_URRI)
result |= TIOCM_RI;
return result;
}
static void ks8695uart_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int mcr;
mcr = UART_GET_MCR(port);
if (mctrl & TIOCM_RTS)
mcr |= URMC_URRTS;
else
mcr &= ~URMC_URRTS;
if (mctrl & TIOCM_DTR)
mcr |= URMC_URDTR;
else
mcr &= ~URMC_URDTR;
UART_PUT_MCR(port, mcr);
}
static void ks8695uart_break_ctl(struct uart_port *port, int break_state)
{
unsigned int lcr;
lcr = UART_GET_LCR(port);
if (break_state == -1)
lcr |= URLC_URSBC;
else
lcr &= ~URLC_URSBC;
UART_PUT_LCR(port, lcr);
}
static int ks8695uart_startup(struct uart_port *port)
{
int retval;
irq_modify_status(KS8695_IRQ_UART_TX, IRQ_NOREQUEST, IRQ_NOAUTOEN);
tx_enable(port, 0);
rx_enable(port, 1);
ms_enable(port, 1);
/*
* Allocate the IRQ
*/
retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, 0, "UART TX", port);
if (retval)
goto err_tx;
retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, 0, "UART RX", port);
if (retval)
goto err_rx;
retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, 0, "UART LineStatus", port);
if (retval)
goto err_ls;
retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, 0, "UART ModemStatus", port);
if (retval)
goto err_ms;
return 0;
err_ms:
free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
err_ls:
free_irq(KS8695_IRQ_UART_RX, port);
err_rx:
free_irq(KS8695_IRQ_UART_TX, port);
err_tx:
return retval;
}
static void ks8695uart_shutdown(struct uart_port *port)
{
/*
* Free the interrupt
*/
free_irq(KS8695_IRQ_UART_RX, port);
free_irq(KS8695_IRQ_UART_TX, port);
free_irq(KS8695_IRQ_UART_MODEM_STATUS, port);
free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
/* disable break condition and fifos */
UART_PUT_LCR(port, UART_GET_LCR(port) & ~URLC_URSBC);
UART_PUT_FCR(port, UART_GET_FCR(port) & ~URFC_URFE);
}
static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old)
{
unsigned int lcr, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
switch (termios->c_cflag & CSIZE) {
case CS5:
lcr = URCL_5;
break;
case CS6:
lcr = URCL_6;
break;
case CS7:
lcr = URCL_7;
break;
default:
lcr = URCL_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
lcr |= URLC_URSB;
/* parity */
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */
if (termios->c_cflag & PARODD)
lcr |= URPE_MARK;
else
lcr |= URPE_SPACE;
}
else if (termios->c_cflag & PARODD)
lcr |= URPE_ODD;
else
lcr |= URPE_EVEN;
}
if (port->fifosize > 1)
fcr = URFC_URFRT_8 | URFC_URTFR | URFC_URRFR | URFC_URFE;
spin_lock_irqsave(&port->lock, flags);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = URLS_URROE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (URLS_URFE | URLS_URPE);
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= URLS_URBI;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= (URLS_URFE | URLS_URPE);
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= URLS_URBI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= URLS_URROE;
}
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_DUMMY_LSR_RX;
/* first, disable everything */
if (UART_ENABLE_MS(port, termios->c_cflag))
ks8695uart_enable_ms(port);
else
ks8695uart_disable_ms(port);
/* Set baud rate */
UART_PUT_BRDR(port, quot);
UART_PUT_LCR(port, lcr);
UART_PUT_FCR(port, fcr);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *ks8695uart_type(struct uart_port *port)
{
return port->type == PORT_KS8695 ? "KS8695" : NULL;
}
/*
* Release the memory region(s) being used by 'port'
*/
static void ks8695uart_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'
*/
static int ks8695uart_request_port(struct uart_port *port)
{
return request_mem_region(port->mapbase, UART_PORT_SIZE,
"serial_ks8695") != NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void ks8695uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_KS8695;
ks8695uart_request_port(port);
}
}
/*
* verify the new serial_struct (for TIOCSSERIAL).
*/
static int ks8695uart_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_KS8695)
ret = -EINVAL;
if (ser->irq != port->irq)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
return ret;
}
static struct uart_ops ks8695uart_pops = {
.tx_empty = ks8695uart_tx_empty,
.set_mctrl = ks8695uart_set_mctrl,
.get_mctrl = ks8695uart_get_mctrl,
.stop_tx = ks8695uart_stop_tx,
.start_tx = ks8695uart_start_tx,
.stop_rx = ks8695uart_stop_rx,
.enable_ms = ks8695uart_enable_ms,
.break_ctl = ks8695uart_break_ctl,
.startup = ks8695uart_startup,
.shutdown = ks8695uart_shutdown,
.set_termios = ks8695uart_set_termios,
.type = ks8695uart_type,
.release_port = ks8695uart_release_port,
.request_port = ks8695uart_request_port,
.config_port = ks8695uart_config_port,
.verify_port = ks8695uart_verify_port,
};
static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
{
.membase = KS8695_UART_VA,
.mapbase = KS8695_UART_PA,
.iotype = SERIAL_IO_MEM,
.irq = KS8695_IRQ_UART_TX,
.uartclk = KS8695_CLOCK_RATE * 16,
.fifosize = 16,
.ops = &ks8695uart_pops,
.flags = ASYNC_BOOT_AUTOCONF,
.line = 0,
}
};
#ifdef CONFIG_SERIAL_KS8695_CONSOLE
static void ks8695_console_putchar(struct uart_port *port, int ch)
{
while (!(UART_GET_LSR(port) & URLS_URTHRE))
barrier();
UART_PUT_CHAR(port, ch);
}
static void ks8695_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = ks8695uart_ports + co->index;
uart_console_write(port, s, count, ks8695_console_putchar);
}
static void __init ks8695_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits)
{
unsigned int lcr;
lcr = UART_GET_LCR(port);
switch (lcr & URLC_PARITY) {
case URPE_ODD:
*parity = 'o';
break;
case URPE_EVEN:
*parity = 'e';
break;
default:
*parity = 'n';
}
switch (lcr & URLC_URCL) {
case URCL_5:
*bits = 5;
break;
case URCL_6:
*bits = 6;
break;
case URCL_7:
*bits = 7;
break;
default:
*bits = 8;
}
*baud = port->uartclk / (UART_GET_BRDR(port) & 0x0FFF);
*baud /= 16;
*baud &= 0xFFFFFFF0;
}
static int __init ks8695_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
port = uart_get_console(ks8695uart_ports, SERIAL_KS8695_NR, co);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
ks8695_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver ks8695_reg;
static struct console ks8695_console = {
.name = SERIAL_KS8695_DEVNAME,
.write = ks8695_console_write,
.device = uart_console_device,
.setup = ks8695_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &ks8695_reg,
};
static int __init ks8695_console_init(void)
{
add_preferred_console(SERIAL_KS8695_DEVNAME, 0, NULL);
register_console(&ks8695_console);
return 0;
}
console_initcall(ks8695_console_init);
#define KS8695_CONSOLE &ks8695_console
#else
#define KS8695_CONSOLE NULL
#endif
static struct uart_driver ks8695_reg = {
.owner = THIS_MODULE,
.driver_name = "serial_ks8695",
.dev_name = SERIAL_KS8695_DEVNAME,
.major = SERIAL_KS8695_MAJOR,
.minor = SERIAL_KS8695_MINOR,
.nr = SERIAL_KS8695_NR,
.cons = KS8695_CONSOLE,
};
static int __init ks8695uart_init(void)
{
int i, ret;
printk(KERN_INFO "Serial: Micrel KS8695 UART driver\n");
ret = uart_register_driver(&ks8695_reg);
if (ret)
return ret;
for (i = 0; i < SERIAL_KS8695_NR; i++)
uart_add_one_port(&ks8695_reg, &ks8695uart_ports[0]);
return 0;
}
static void __exit ks8695uart_exit(void)
{
int i;
for (i = 0; i < SERIAL_KS8695_NR; i++)
uart_remove_one_port(&ks8695_reg, &ks8695uart_ports[0]);
uart_unregister_driver(&ks8695_reg);
}
module_init(ks8695uart_init);
module_exit(ks8695uart_exit);
MODULE_DESCRIPTION("KS8695 serial port driver");
MODULE_AUTHOR("Micrel Inc.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
andr00ib/e730-e739_cm10.1_kernel | fs/logfs/compr.c | 1099 | 1792 | /*
* fs/logfs/compr.c - compression routines
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
*/
#include "logfs.h"
#include <linux/vmalloc.h>
#include <linux/zlib.h>
#define COMPR_LEVEL 3
static DEFINE_MUTEX(compr_mutex);
static struct z_stream_s stream;
int logfs_compress(void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
ret = -EIO;
mutex_lock(&compr_mutex);
err = zlib_deflateInit(&stream, COMPR_LEVEL);
if (err != Z_OK)
goto error;
stream.next_in = in;
stream.avail_in = inlen;
stream.total_in = 0;
stream.next_out = out;
stream.avail_out = outlen;
stream.total_out = 0;
err = zlib_deflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto error;
err = zlib_deflateEnd(&stream);
if (err != Z_OK)
goto error;
if (stream.total_out >= stream.total_in)
goto error;
ret = stream.total_out;
error:
mutex_unlock(&compr_mutex);
return ret;
}
int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
ret = -EIO;
mutex_lock(&compr_mutex);
err = zlib_inflateInit(&stream);
if (err != Z_OK)
goto error;
stream.next_in = in;
stream.avail_in = inlen;
stream.total_in = 0;
stream.next_out = out;
stream.avail_out = outlen;
stream.total_out = 0;
err = zlib_inflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto error;
err = zlib_inflateEnd(&stream);
if (err != Z_OK)
goto error;
ret = 0;
error:
mutex_unlock(&compr_mutex);
return ret;
}
int __init logfs_compr_init(void)
{
size_t size = max(zlib_deflate_workspacesize(),
zlib_inflate_workspacesize());
stream.workspace = vmalloc(size);
if (!stream.workspace)
return -ENOMEM;
return 0;
}
void logfs_compr_exit(void)
{
vfree(stream.workspace);
}
| gpl-2.0 |
TeamWin/android_kernel_samsung_j7elte | drivers/net/wireless/mwifiex/scan.c | 1611 | 62116 | /*
* Marvell Wireless LAN device driver: scan ioctl and command handling
*
* Copyright (C) 2011, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "11n.h"
#include "cfg80211.h"
/* The maximum number of channels the firmware can scan per command */
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
+ (MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN \
*sizeof(struct mwifiex_chan_scan_param_set)))
/* Memory needed to store supported rate */
#define RATE_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_rates_param_set) \
+ HOSTCMD_SUPPORTED_RATES)
/* Memory needed to store a max number/size WildCard SSID TLV for a firmware
scan */
#define WILDCARD_SSID_TLV_MAX_SIZE \
(MWIFIEX_MAX_SSID_LIST_LENGTH * \
(sizeof(struct mwifiex_ie_types_wildcard_ssid_params) \
+ IEEE80211_MAX_SSID_LEN))
/* Maximum memory needed for a mwifiex_scan_cmd_config with all TLVs at max */
#define MAX_SCAN_CFG_ALLOC (sizeof(struct mwifiex_scan_cmd_config) \
+ sizeof(struct mwifiex_ie_types_num_probes) \
+ sizeof(struct mwifiex_ie_types_htcap) \
+ CHAN_TLV_MAX_SIZE \
+ RATE_TLV_MAX_SIZE \
+ WILDCARD_SSID_TLV_MAX_SIZE)
union mwifiex_scan_cmd_config_tlv {
/* Scan configuration (variable length) */
struct mwifiex_scan_cmd_config config;
/* Max allocated block */
u8 config_alloc_buf[MAX_SCAN_CFG_ALLOC];
};
enum cipher_suite {
CIPHER_SUITE_TKIP,
CIPHER_SUITE_CCMP,
CIPHER_SUITE_MAX
};
static u8 mwifiex_wpa_oui[CIPHER_SUITE_MAX][4] = {
{ 0x00, 0x50, 0xf2, 0x02 }, /* TKIP */
{ 0x00, 0x50, 0xf2, 0x04 }, /* AES */
};
static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
{ 0x00, 0x0f, 0xac, 0x02 }, /* TKIP */
{ 0x00, 0x0f, 0xac, 0x04 }, /* AES */
};
/*
* This function parses a given IE for a given OUI.
*
* This is used to parse a WPA/RSN IE to find if it has
* a given oui in PTK.
*/
static u8
mwifiex_search_oui_in_ie(struct ie_body *iebody, u8 *oui)
{
u8 count;
count = iebody->ptk_cnt[0];
/* There could be multiple OUIs for PTK hence
1) Take the length.
2) Check all the OUIs for AES.
3) If one of them is AES then pass success. */
while (count) {
if (!memcmp(iebody->ptk_body, oui, sizeof(iebody->ptk_body)))
return MWIFIEX_OUI_PRESENT;
--count;
if (count)
iebody = (struct ie_body *) ((u8 *) iebody +
sizeof(iebody->ptk_body));
}
pr_debug("info: %s: OUI is not found in PTK\n", __func__);
return MWIFIEX_OUI_NOT_PRESENT;
}
/*
* This function checks if a given OUI is present in a RSN IE.
*
* The function first checks if a RSN IE is present or not in the
* BSS descriptor. It tries to locate the OUI only if such an IE is
* present.
*/
static u8
mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
{
u8 *oui;
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id == WLAN_EID_RSN))) {
iebody = (struct ie_body *)
(((u8 *) bss_desc->bcn_rsn_ie->data) +
RSN_GTK_OUI_OFFSET);
oui = &mwifiex_rsn_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
return ret;
}
return ret;
}
/*
* This function checks if a given OUI is present in a WPA IE.
*
* The function first checks if a WPA IE is present or not in the
* BSS descriptor. It tries to locate the OUI only if such an IE is
* present.
*/
static u8
mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
{
u8 *oui;
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (((bss_desc->bcn_wpa_ie) &&
((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
WLAN_EID_VENDOR_SPECIFIC))) {
iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
return ret;
}
return ret;
}
/*
* This function compares two SSIDs and checks if they match.
*/
s32
mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2)
{
if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
return -1;
return memcmp(ssid1->ssid, ssid2->ssid, ssid1->ssid_len);
}
/*
* This function checks if wapi is enabled in driver and scanned network is
* compatible with it.
*/
static bool
mwifiex_is_bss_wapi(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wapi_enabled &&
(bss_desc->bcn_wapi_ie &&
((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
WLAN_EID_BSS_AC_ACCESS_DELAY))) {
return true;
}
return false;
}
/*
* This function checks if driver is configured with no security mode and
* scanned network is compatible with it.
*/
static bool
mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
WLAN_EID_RSN)) &&
!priv->sec_info.encryption_mode && !bss_desc->privacy) {
return true;
}
return false;
}
/*
* This function checks if static WEP is enabled in driver and scanned network
* is compatible with it.
*/
static bool
mwifiex_is_bss_static_wep(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled && bss_desc->privacy) {
return true;
}
return false;
}
/*
* This function checks if wpa is enabled in driver and scanned network is
* compatible with it.
*/
static bool
mwifiex_is_bss_wpa(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
((*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
) {
dev_dbg(priv->adapter->dev, "info: %s: WPA:"
" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
"EncMode=%#x privacy=%#x\n", __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id : 0,
(priv->sec_info.wep_enabled) ? "e" : "d",
(priv->sec_info.wpa_enabled) ? "e" : "d",
(priv->sec_info.wpa2_enabled) ? "e" : "d",
priv->sec_info.encryption_mode,
bss_desc->privacy);
return true;
}
return false;
}
/*
* This function checks if wpa2 is enabled in driver and scanned network is
* compatible with it.
*/
static bool
mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled &&
!priv->sec_info.wpa_enabled &&
priv->sec_info.wpa2_enabled &&
((bss_desc->bcn_rsn_ie) &&
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
" wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
"EncMode=%#x privacy=%#x\n", __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id : 0,
(priv->sec_info.wep_enabled) ? "e" : "d",
(priv->sec_info.wpa_enabled) ? "e" : "d",
(priv->sec_info.wpa2_enabled) ? "e" : "d",
priv->sec_info.encryption_mode,
bss_desc->privacy);
return true;
}
return false;
}
/*
* This function checks if adhoc AES is enabled in driver and scanned network is
* compatible with it.
*/
static bool
mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
((!bss_desc->bcn_wpa_ie) ||
((*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
!priv->sec_info.encryption_mode && bss_desc->privacy) {
return true;
}
return false;
}
/*
* This function checks if dynamic WEP is enabled in driver and scanned network
* is compatible with it.
*/
static bool
mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
((!bss_desc->bcn_wpa_ie) ||
((*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
dev_dbg(priv->adapter->dev, "info: %s: dynamic "
"WEP: wpa_ie=%#x wpa2_ie=%#x "
"EncMode=%#x privacy=%#x\n",
__func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).
vend_hdr.element_id : 0,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).
ieee_hdr.element_id : 0,
priv->sec_info.encryption_mode,
bss_desc->privacy);
return true;
}
return false;
}
/*
* This function checks if a scanned network is compatible with the driver
* settings.
*
* WEP WPA WPA2 ad-hoc encrypt Network
* enabled enabled enabled AES mode Privacy WPA WPA2 Compatible
* 0 0 0 0 NONE 0 0 0 yes No security
* 0 1 0 0 x 1x 1 x yes WPA (disable
* HT if no AES)
* 0 0 1 0 x 1x x 1 yes WPA2 (disable
* HT if no AES)
* 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
* 1 0 0 0 NONE 1 0 0 yes Static WEP
* (disable HT)
* 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
*
* Compatibility is not matched while roaming, except for mode.
*/
static s32
mwifiex_is_network_compatible(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc, u32 mode)
{
struct mwifiex_adapter *adapter = priv->adapter;
bss_desc->disable_11n = false;
/* Don't check for compatibility if roaming */
if (priv->media_connected &&
(priv->bss_mode == NL80211_IFTYPE_STATION) &&
(bss_desc->bss_mode == NL80211_IFTYPE_STATION))
return 0;
if (priv->wps.session_enable) {
dev_dbg(adapter->dev,
"info: return success directly in WPS period\n");
return 0;
}
if (mwifiex_is_bss_wapi(priv, bss_desc)) {
dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
return 0;
}
if (bss_desc->bss_mode == mode) {
if (mwifiex_is_bss_no_sec(priv, bss_desc)) {
/* No security */
return 0;
} else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
/* Static WEP enabled */
dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
bss_desc->disable_11n = true;
return 0;
} else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
/* WPA enabled */
if (((priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN) &&
bss_desc->bcn_ht_cap) &&
!mwifiex_is_wpa_oui_present(bss_desc,
CIPHER_SUITE_CCMP)) {
if (mwifiex_is_wpa_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
dev_dbg(adapter->dev,
"info: Disable 11n if AES "
"is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
}
}
return 0;
} else if (mwifiex_is_bss_wpa2(priv, bss_desc)) {
/* WPA2 enabled */
if (((priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN) &&
bss_desc->bcn_ht_cap) &&
!mwifiex_is_rsn_oui_present(bss_desc,
CIPHER_SUITE_CCMP)) {
if (mwifiex_is_rsn_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
dev_dbg(adapter->dev,
"info: Disable 11n if AES "
"is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
}
}
return 0;
} else if (mwifiex_is_bss_adhoc_aes(priv, bss_desc)) {
/* Ad-hoc AES enabled */
return 0;
} else if (mwifiex_is_bss_dynamic_wep(priv, bss_desc)) {
/* Dynamic WEP enabled */
return 0;
}
/* Security doesn't match */
dev_dbg(adapter->dev,
"info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
"WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
(bss_desc->bcn_wpa_ie) ?
(*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
(bss_desc->bcn_rsn_ie) ?
(*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
(priv->sec_info.wep_enabled) ? "e" : "d",
(priv->sec_info.wpa_enabled) ? "e" : "d",
(priv->sec_info.wpa2_enabled) ? "e" : "d",
priv->sec_info.encryption_mode, bss_desc->privacy);
return -1;
}
/* Mode doesn't match */
return -1;
}
/*
* This function creates a channel list for the driver to scan, based
* on region/band information.
*
* This routine is used for any scan that is not provided with a
* specific channel list to scan.
*/
static int
mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg
*user_scan_in,
struct mwifiex_chan_scan_param_set
*scan_chan_list,
u8 filtered_scan)
{
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
if (!priv->wdev->wiphy->bands[band])
continue;
sband = priv->wdev->wiphy->bands[band];
for (i = 0; (i < sband->n_channels) ; i++) {
ch = &sband->channels[i];
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
scan_chan_list[chan_idx].radio_type = band;
if (user_scan_in &&
user_scan_in->chan_list[0].scan_time)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
scan_chan_list[chan_idx].chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
scan_chan_list[chan_idx].chan_number =
(u32) ch->hw_value;
if (filtered_scan) {
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->specific_scan_time);
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_DISABLE_CHAN_FILT;
}
chan_idx++;
}
}
return chan_idx;
}
/*
* This function constructs and sends multiple scan config commands to
* the firmware.
*
* Previous routines in the code flow have created a scan command configuration
* with any requested TLVs. This function splits the channel TLV into maximum
* channels supported per scan lists and sends the portion of the channel TLV,
* along with the other TLVs, to the firmware.
*/
static int
mwifiex_scan_channel_list(struct mwifiex_private *priv,
u32 max_chan_per_scan, u8 filtered_scan,
struct mwifiex_scan_cmd_config *scan_cfg_out,
struct mwifiex_ie_types_chan_list_param_set
*chan_tlv_out,
struct mwifiex_chan_scan_param_set *scan_chan_list)
{
int ret = 0;
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
u32 tlv_idx;
u32 total_scan_time;
u32 done_early;
if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
dev_dbg(priv->adapter->dev,
"info: Scan: Null detect: %p, %p, %p\n",
scan_cfg_out, chan_tlv_out, scan_chan_list);
return -1;
}
chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
/* Set the temp channel struct pointer to the start of the desired
list */
tmp_chan_list = scan_chan_list;
/* Loop through the desired channel list, sending a new firmware scan
commands for each max_chan_per_scan channels (or for 1,6,11
individually if configured accordingly) */
while (tmp_chan_list->chan_number) {
tlv_idx = 0;
total_scan_time = 0;
chan_tlv_out->header.len = 0;
start_chan = tmp_chan_list;
done_early = false;
/*
* Construct the Channel TLV for the scan command. Continue to
* insert channel TLVs until:
* - the tlv_idx hits the maximum configured per scan command
* - the next channel to insert is 0 (end of desired channel
* list)
* - done_early is set (controlling individual scanning of
* 1,6,11)
*/
while (tlv_idx < max_chan_per_scan &&
tmp_chan_list->chan_number && !done_early) {
dev_dbg(priv->adapter->dev,
"info: Scan: Chan(%3d), Radio(%d),"
" Mode(%d, %d), Dur(%d)\n",
tmp_chan_list->chan_number,
tmp_chan_list->radio_type,
tmp_chan_list->chan_scan_mode_bitmap
& MWIFIEX_PASSIVE_SCAN,
(tmp_chan_list->chan_scan_mode_bitmap
& MWIFIEX_DISABLE_CHAN_FILT) >> 1,
le16_to_cpu(tmp_chan_list->max_scan_time));
/* Copy the current channel TLV to the command being
prepared */
memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
tmp_chan_list,
sizeof(chan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size
appended */
le16_add_cpu(&chan_tlv_out->header.len,
sizeof(chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
* of the between the channel tlv pointer and the start
* of the tlv buffer. This compensates for any TLVs
* that were appended before the channel list.
*/
scan_cfg_out->tlv_buf_len = (u32) ((u8 *) chan_tlv_out -
scan_cfg_out->tlv_buf);
/* Add the size of the channel tlv header and the data
length */
scan_cfg_out->tlv_buf_len +=
(sizeof(chan_tlv_out->header)
+ le16_to_cpu(chan_tlv_out->header.len));
/* Increment the index to the channel tlv we are
constructing */
tlv_idx++;
/* Count the total scan time per command */
total_scan_time +=
le16_to_cpu(tmp_chan_list->max_scan_time);
done_early = false;
/* Stop the loop if the *current* channel is in the
1,6,11 set and we are not filtering on a BSSID
or SSID. */
if (!filtered_scan &&
(tmp_chan_list->chan_number == 1 ||
tmp_chan_list->chan_number == 6 ||
tmp_chan_list->chan_number == 11))
done_early = true;
/* Increment the tmp pointer to the next channel to
be scanned */
tmp_chan_list++;
/* Stop the loop if the *next* channel is in the 1,6,11
set. This will cause it to be the only channel
scanned on the next interation */
if (!filtered_scan &&
(tmp_chan_list->chan_number == 1 ||
tmp_chan_list->chan_number == 6 ||
tmp_chan_list->chan_number == 11))
done_early = true;
}
/* The total scan time should be less than scan command timeout
value */
if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
dev_err(priv->adapter->dev, "total scan time %dms"
" is over limit (%dms), scan skipped\n",
total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
ret = -1;
break;
}
priv->adapter->scan_channels = start_chan;
/* Send the scan command to the firmware with the specified
cfg */
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
HostCmd_ACT_GEN_SET, 0,
scan_cfg_out);
if (ret)
break;
}
if (ret)
return -1;
return 0;
}
/*
* This function constructs a scan command configuration structure to use
* in scan commands.
*
* Application layer or other functions can invoke network scanning
* with a scan configuration supplied in a user scan configuration structure.
* This structure is used as the basis of one or many scan command configuration
* commands that are sent to the command processing module and eventually to the
* firmware.
*
* This function creates a scan command configuration structure based on the
* following user supplied parameters (if present):
* - SSID filter
* - BSSID filter
* - Number of Probes to be sent
* - Channel list
*
* If the SSID or BSSID filter is not present, the filter is disabled/cleared.
* If the number of probes is not set, adapter default setting is used.
*/
static void
mwifiex_config_scan(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg *user_scan_in,
struct mwifiex_scan_cmd_config *scan_cfg_out,
struct mwifiex_ie_types_chan_list_param_set **chan_list_out,
struct mwifiex_chan_scan_param_set *scan_chan_list,
u8 *max_chan_per_scan, u8 *filtered_scan,
u8 *scan_current_only)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
struct mwifiex_ie_types_rates_param_set *rates_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
u32 chan_idx;
u32 chan_num;
u32 scan_type;
u16 scan_dur;
u8 channel;
u8 radio_type;
int i;
u8 ssid_filter;
u8 rates[MWIFIEX_SUPPORTED_RATES];
u32 rates_size;
struct mwifiex_ie_types_htcap *ht_cap;
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
command will append channelTLVs at *chan_list_out. The difference
between the *chan_list_out and the tlv_buf start will be used to
calculate the size of anything we add in this routine. */
scan_cfg_out->tlv_buf_len = 0;
/* Running tlv pointer. Assigned to chan_list_out at end of function
so later routines know where channels can be added to the command
buf */
tlv_pos = scan_cfg_out->tlv_buf;
/* Initialize the scan as un-filtered; the flag is later set to TRUE
below if a SSID or BSSID filter is sent in the command */
*filtered_scan = false;
/* Initialize the scan as not being only on the current channel. If
the channel list is customized, only contains one channel, and is
the active channel, this is set true and data flow is not halted. */
*scan_current_only = false;
if (user_scan_in) {
/* Default the ssid_filter flag to TRUE, set false under
certain wildcard conditions and qualified by the existence
of an SSID list before marking the scan as filtered */
ssid_filter = true;
/* Set the BSS type scan filter, use Adapter setting if
unset */
scan_cfg_out->bss_mode =
(user_scan_in->bss_mode ? (u8) user_scan_in->
bss_mode : (u8) adapter->scan_mode);
/* Set the number of probes to send, use Adapter setting
if unset */
num_probes =
(user_scan_in->num_probes ? user_scan_in->
num_probes : adapter->scan_probes);
/*
* Set the BSSID filter to the incoming configuration,
* if non-zero. If not set, it will remain disabled
* (all zeros).
*/
memcpy(scan_cfg_out->specific_bssid,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
for (i = 0; i < user_scan_in->num_ssids; i++) {
ssid_len = user_scan_in->ssid_list[i].ssid_len;
wildcard_ssid_tlv =
(struct mwifiex_ie_types_wildcard_ssid_params *)
tlv_pos;
wildcard_ssid_tlv->header.type =
cpu_to_le16(TLV_TYPE_WILDCARDSSID);
wildcard_ssid_tlv->header.len = cpu_to_le16(
(u16) (ssid_len + sizeof(wildcard_ssid_tlv->
max_ssid_length)));
/*
* max_ssid_length = 0 tells firmware to perform
* specific scan for the SSID filled, whereas
* max_ssid_length = IEEE80211_MAX_SSID_LEN is for
* wildcard scan.
*/
if (ssid_len)
wildcard_ssid_tlv->max_ssid_length = 0;
else
wildcard_ssid_tlv->max_ssid_length =
IEEE80211_MAX_SSID_LEN;
memcpy(wildcard_ssid_tlv->ssid,
user_scan_in->ssid_list[i].ssid, ssid_len);
tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ le16_to_cpu(wildcard_ssid_tlv->header.len));
dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
i, wildcard_ssid_tlv->ssid,
wildcard_ssid_tlv->max_ssid_length);
/* Empty wildcard ssid with a maxlen will match many or
potentially all SSIDs (maxlen == 32), therefore do
not treat the scan as
filtered. */
if (!ssid_len && wildcard_ssid_tlv->max_ssid_length)
ssid_filter = false;
}
/*
* The default number of channels sent in the command is low to
* ensure the response buffer from the firmware does not
* truncate scan results. That is not an issue with an SSID
* or BSSID filter applied to the scan results in the firmware.
*/
if ((i && ssid_filter) ||
!is_zero_ether_addr(scan_cfg_out->specific_bssid))
*filtered_scan = true;
} else {
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
num_probes = adapter->scan_probes;
}
/*
* If a specific BSSID or SSID is used, the number of channels in the
* scan command will be increased to the absolute maximum.
*/
if (*filtered_scan)
*max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
else
*max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
/* If the input config or adapter has the number of Probes set,
add tlv */
if (num_probes) {
dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
num_probes);
num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
num_probes_tlv->header.len =
cpu_to_le16(sizeof(num_probes_tlv->num_probes));
num_probes_tlv->num_probes = cpu_to_le16((u16) num_probes);
tlv_pos += sizeof(num_probes_tlv->header) +
le16_to_cpu(num_probes_tlv->header.len);
}
/* Append rates tlv */
memset(rates, 0, sizeof(rates));
rates_size = mwifiex_get_supported_rates(priv, rates);
rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
rates_tlv->header.len = cpu_to_le16((u16) rates_size);
memcpy(rates_tlv->rates, rates, rates_size);
tlv_pos += sizeof(rates_tlv->header) + rates_size;
dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN)) {
ht_cap = (struct mwifiex_ie_types_htcap *) tlv_pos;
memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type =
mwifiex_band_to_radio_type(priv->adapter->config_bands);
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
}
/* Append vendor specific IE TLV */
mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_SCAN, &tlv_pos);
/*
* Set the output for the channel TLV to the address in the tlv buffer
* past any TLVs that were added in this function (SSID, num_probes).
* Channel TLVs will be added past this for each scan command,
* preserving the TLVs that were previously added.
*/
*chan_list_out =
(struct mwifiex_ie_types_chan_list_param_set *) tlv_pos;
if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
for (chan_idx = 0;
chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
user_scan_in->chan_list[chan_idx].chan_number;
chan_idx++) {
channel = user_scan_in->chan_list[chan_idx].chan_number;
(scan_chan_list + chan_idx)->chan_number = channel;
radio_type =
user_scan_in->chan_list[chan_idx].radio_type;
(scan_chan_list + chan_idx)->radio_type = radio_type;
scan_type = user_scan_in->chan_list[chan_idx].scan_type;
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
if (*filtered_scan)
(scan_chan_list +
chan_idx)->chan_scan_mode_bitmap
|= MWIFIEX_DISABLE_CHAN_FILT;
if (user_scan_in->chan_list[chan_idx].scan_time) {
scan_dur = (u16) user_scan_in->
chan_list[chan_idx].scan_time;
} else {
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
scan_dur = adapter->passive_scan_time;
else if (*filtered_scan)
scan_dur = adapter->specific_scan_time;
else
scan_dur = adapter->active_scan_time;
}
(scan_chan_list + chan_idx)->min_scan_time =
cpu_to_le16(scan_dur);
(scan_chan_list + chan_idx)->max_scan_time =
cpu_to_le16(scan_dur);
}
/* Check if we are only scanning the current channel */
if ((chan_idx == 1) &&
(user_scan_in->chan_list[0].chan_number ==
priv->curr_bss_params.bss_descriptor.channel)) {
*scan_current_only = true;
dev_dbg(adapter->dev,
"info: Scan: Scanning current channel only\n");
}
chan_num = chan_idx;
} else {
dev_dbg(adapter->dev,
"info: Scan: Creating full region channel list\n");
chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
}
/*
* In associated state we will reduce the number of channels scanned per
* scan command to avoid any traffic delay/loss. This number is decided
* based on total number of channels to be scanned due to constraints
* of command buffers.
*/
if (priv->media_connected) {
if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
*max_chan_per_scan = 1;
else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
*max_chan_per_scan = 2;
else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
*max_chan_per_scan = 3;
else
*max_chan_per_scan = 4;
}
}
/*
* This function inspects the scan response buffer for pointers to
* expected TLVs.
*
* TLVs can be included at the end of the scan response BSS information.
*
* Data in the buffer is parsed pointers to TLVs that can potentially
* be passed back in the response.
*/
static void
mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
struct mwifiex_ie_types_data *tlv,
u32 tlv_buf_size, u32 req_tlv_type,
struct mwifiex_ie_types_data **tlv_data)
{
struct mwifiex_ie_types_data *current_tlv;
u32 tlv_buf_left;
u32 tlv_type;
u32 tlv_len;
current_tlv = tlv;
tlv_buf_left = tlv_buf_size;
*tlv_data = NULL;
dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
tlv_buf_size);
while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
tlv_type = le16_to_cpu(current_tlv->header.type);
tlv_len = le16_to_cpu(current_tlv->header.len);
if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
break;
}
if (req_tlv_type == tlv_type) {
switch (tlv_type) {
case TLV_TYPE_TSFTIMESTAMP:
dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
"timestamp TLV, len = %d\n", tlv_len);
*tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
" band list TLV, len = %d\n", tlv_len);
*tlv_data = current_tlv;
break;
default:
dev_err(adapter->dev,
"SCAN_RESP: unhandled TLV = %d\n",
tlv_type);
/* Give up, this seems corrupted */
return;
}
}
if (*tlv_data)
break;
tlv_buf_left -= (sizeof(tlv->header) + tlv_len);
current_tlv =
(struct mwifiex_ie_types_data *) (current_tlv->data +
tlv_len);
} /* while */
}
/*
* This function parses provided beacon buffer and updates
* respective fields in bss descriptor structure.
*/
int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
struct mwifiex_bssdescriptor *bss_entry)
{
int ret = 0;
u8 element_id;
struct ieee_types_fh_param_set *fh_param_set;
struct ieee_types_ds_param_set *ds_param_set;
struct ieee_types_cf_param_set *cf_param_set;
struct ieee_types_ibss_param_set *ibss_param_set;
u8 *current_ptr;
u8 *rate;
u8 element_len;
u16 total_ie_len;
u8 bytes_to_copy;
u8 rate_size;
u8 found_data_rate_ie;
u32 bytes_left;
struct ieee_types_vendor_specific *vendor_ie;
const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
found_data_rate_ie = false;
rate_size = 0;
current_ptr = bss_entry->beacon_buf;
bytes_left = bss_entry->beacon_buf_size;
/* Process variable IE */
while (bytes_left >= 2) {
element_id = *current_ptr;
element_len = *(current_ptr + 1);
total_ie_len = element_len + sizeof(struct ieee_types_header);
if (bytes_left < total_ie_len) {
dev_err(adapter->dev, "err: InterpretIE: in processing"
" IE, bytes left < IE length\n");
return -1;
}
switch (element_id) {
case WLAN_EID_SSID:
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
dev_dbg(adapter->dev,
"info: InterpretIE: ssid: %-32s\n",
bss_entry->ssid.ssid);
break;
case WLAN_EID_SUPP_RATES:
memcpy(bss_entry->data_rates, current_ptr + 2,
element_len);
memcpy(bss_entry->supported_rates, current_ptr + 2,
element_len);
rate_size = element_len;
found_data_rate_ie = true;
break;
case WLAN_EID_FH_PARAMS:
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
memcpy(&bss_entry->phy_param_set.fh_param_set,
fh_param_set,
sizeof(struct ieee_types_fh_param_set));
break;
case WLAN_EID_DS_PARAMS:
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
bss_entry->channel = ds_param_set->current_chan;
memcpy(&bss_entry->phy_param_set.ds_param_set,
ds_param_set,
sizeof(struct ieee_types_ds_param_set));
break;
case WLAN_EID_CF_PARAMS:
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
memcpy(&bss_entry->ss_param_set.cf_param_set,
cf_param_set,
sizeof(struct ieee_types_cf_param_set));
break;
case WLAN_EID_IBSS_PARAMS:
ibss_param_set =
(struct ieee_types_ibss_param_set *)
current_ptr;
memcpy(&bss_entry->ss_param_set.ibss_param_set,
ibss_param_set,
sizeof(struct ieee_types_ibss_param_set));
break;
case WLAN_EID_ERP_INFO:
bss_entry->erp_flags = *(current_ptr + 2);
break;
case WLAN_EID_EXT_SUPP_RATES:
/*
* Only process extended supported rate
* if data rate is already found.
* Data rate IE should come before
* extended supported rate IE
*/
if (found_data_rate_ie) {
if ((element_len + rate_size) >
MWIFIEX_SUPPORTED_RATES)
bytes_to_copy =
(MWIFIEX_SUPPORTED_RATES -
rate_size);
else
bytes_to_copy = element_len;
rate = (u8 *) bss_entry->data_rates;
rate += rate_size;
memcpy(rate, current_ptr + 2, bytes_to_copy);
rate = (u8 *) bss_entry->supported_rates;
rate += rate_size;
memcpy(rate, current_ptr + 2, bytes_to_copy);
}
break;
case WLAN_EID_VENDOR_SPECIFIC:
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
if (!memcmp
(vendor_ie->vend_hdr.oui, wpa_oui,
sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
} else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
total_ie_len ==
sizeof(struct ieee_types_wmm_info))
/*
* Only accept and copy the WMM IE if
* it matches the size expected for the
* WMM Info IE or the WMM Parameter IE.
*/
memcpy((u8 *) &bss_entry->wmm_ie,
current_ptr, total_ie_len);
}
break;
case WLAN_EID_RSN:
bss_entry->bcn_rsn_ie =
(struct ieee_types_generic *) current_ptr;
bss_entry->rsn_offset = (u16) (current_ptr -
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_AC_ACCESS_DELAY:
bss_entry->bcn_wapi_ie =
(struct ieee_types_generic *) current_ptr;
bss_entry->wapi_offset = (u16) (current_ptr -
bss_entry->beacon_buf);
break;
case WLAN_EID_HT_CAPABILITY:
bss_entry->bcn_ht_cap = (struct ieee80211_ht_cap *)
(current_ptr +
sizeof(struct ieee_types_header));
bss_entry->ht_cap_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_HT_OPERATION:
bss_entry->bcn_ht_oper =
(struct ieee80211_ht_operation *)(current_ptr +
sizeof(struct ieee_types_header));
bss_entry->ht_info_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_VHT_CAPABILITY:
bss_entry->disable_11ac = false;
bss_entry->bcn_vht_cap =
(void *)(current_ptr +
sizeof(struct ieee_types_header));
bss_entry->vht_cap_offset =
(u16)((u8 *)bss_entry->bcn_vht_cap -
bss_entry->beacon_buf);
break;
case WLAN_EID_VHT_OPERATION:
bss_entry->bcn_vht_oper =
(void *)(current_ptr +
sizeof(struct ieee_types_header));
bss_entry->vht_info_offset =
(u16)((u8 *)bss_entry->bcn_vht_oper -
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
bss_entry->bcn_bss_co_2040 = current_ptr +
sizeof(struct ieee_types_header);
bss_entry->bss_co_2040_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
bss_entry->bcn_ext_cap = current_ptr +
sizeof(struct ieee_types_header);
bss_entry->ext_cap_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_OPMODE_NOTIF:
bss_entry->oper_mode =
(void *)(current_ptr +
sizeof(struct ieee_types_header));
bss_entry->oper_mode_offset =
(u16)((u8 *)bss_entry->oper_mode -
bss_entry->beacon_buf);
break;
default:
break;
}
current_ptr += element_len + 2;
/* Need to account for IE ID and IE Len */
bytes_left -= (element_len + 2);
} /* while (bytes_left > 2) */
return ret;
}
/*
* This function converts radio type scan parameter to a band configuration
* to be used in join command.
*/
static u8
mwifiex_radio_type_to_band(u8 radio_type)
{
switch (radio_type) {
case HostCmd_SCAN_RADIO_TYPE_A:
return BAND_A;
case HostCmd_SCAN_RADIO_TYPE_BG:
default:
return BAND_G;
}
}
/*
* This is an internal function used to start a scan based on an input
* configuration.
*
* This uses the input user scan configuration information when provided in
* order to send the appropriate scan commands to firmware to populate or
* update the internal driver scan table.
*/
int mwifiex_scan_networks(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg *user_scan_in)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
union mwifiex_scan_cmd_config_tlv *scan_cfg_out;
struct mwifiex_ie_types_chan_list_param_set *chan_list_out;
struct mwifiex_chan_scan_param_set *scan_chan_list;
u8 filtered_scan;
u8 scan_current_chan_only;
u8 max_chan_per_scan;
unsigned long flags;
if (adapter->scan_processing) {
dev_err(adapter->dev, "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
dev_err(adapter->dev,
"cmd: Scan is blocked during association...\n");
return -EBUSY;
}
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = true;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
GFP_KERNEL);
if (!scan_cfg_out) {
ret = -ENOMEM;
goto done;
}
scan_chan_list = kcalloc(MWIFIEX_USER_SCAN_CHAN_MAX,
sizeof(struct mwifiex_chan_scan_param_set),
GFP_KERNEL);
if (!scan_chan_list) {
kfree(scan_cfg_out);
ret = -ENOMEM;
goto done;
}
mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config,
&chan_list_out, scan_chan_list, &max_chan_per_scan,
&filtered_scan, &scan_current_chan_only);
ret = mwifiex_scan_channel_list(priv, max_chan_per_scan, filtered_scan,
&scan_cfg_out->config, chan_list_out,
scan_chan_list);
/* Get scan command from scan_pending_q and put to cmd_pending_q */
if (!ret) {
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (!list_empty(&adapter->scan_pending_q)) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
if (!priv->scan_request) {
dev_dbg(adapter->dev, "wait internal scan\n");
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
}
}
kfree(scan_cfg_out);
kfree(scan_chan_list);
done:
if (ret) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
}
return ret;
}
/*
* This function prepares a scan command to be sent to the firmware.
*
* This uses the scan command configuration sent to the command processing
* module in command preparation stage to configure a scan command structure
* to send to firmware.
*
* The fixed fields specifying the BSS type and BSSID filters as well as a
* variable number/length of TLVs are sent in the command to firmware.
*
* Preparation also includes -
* - Setting command ID, and proper size
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg)
{
struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan;
/* Set fixed field variables in scan command */
scan_cmd->bss_mode = scan_cfg->bss_mode;
memcpy(scan_cmd->bssid, scan_cfg->specific_bssid,
sizeof(scan_cmd->bssid));
memcpy(scan_cmd->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN);
/* Size is equal to the sizeof(fixed portions) + the TLV len + header */
cmd->size = cpu_to_le16((u16) (sizeof(scan_cmd->bss_mode)
+ sizeof(scan_cmd->bssid)
+ scan_cfg->tlv_buf_len + S_DS_GEN));
return 0;
}
/*
* This function checks compatibility of requested network with current
* driver settings.
*/
int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
int ret = -1;
if (!bss_desc)
return -1;
if ((mwifiex_get_cfp(priv, (u8) bss_desc->bss_band,
(u16) bss_desc->channel, 0))) {
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
ret = mwifiex_is_network_compatible(priv, bss_desc,
priv->bss_mode);
if (ret)
dev_err(priv->adapter->dev,
"Incompatible network settings\n");
break;
default:
ret = 0;
}
}
return ret;
}
static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
struct mwifiex_bssdescriptor *bss_desc;
int ret;
unsigned long flags;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL);
if (!bss_desc)
return -ENOMEM;
ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
if (ret)
goto done;
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (ret)
goto done;
spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
sizeof(priv->curr_bss_params.bss_descriptor));
/* The contents of beacon_ie will be copied to its own buffer
* in mwifiex_save_curr_bcn()
*/
mwifiex_save_curr_bcn(priv);
spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
done:
/* beacon_ie buffer was allocated in function
* mwifiex_fill_new_bss_desc(). Free it now.
*/
kfree(bss_desc->beacon_buf);
kfree(bss_desc);
return 0;
}
/*
* This function handles the command response of scan.
*
* The response buffer for the scan command has the following
* memory layout:
*
* .-------------------------------------------------------------.
* | Header (4 * sizeof(t_u16)): Standard command response hdr |
* .-------------------------------------------------------------.
* | BufSize (t_u16) : sizeof the BSS Description data |
* .-------------------------------------------------------------.
* | NumOfSet (t_u8) : Number of BSS Descs returned |
* .-------------------------------------------------------------.
* | BSSDescription data (variable, size given in BufSize) |
* .-------------------------------------------------------------.
* | TLV data (variable, size calculated using Header->Size, |
* | BufSize and sizeof the fixed fields above) |
* .-------------------------------------------------------------.
*/
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
u8 *bss_info;
u32 scan_resp_size;
u32 bytes_left;
u32 idx;
u32 tlv_buf_size;
struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
u8 is_bgscan_resp;
unsigned long flags;
struct cfg80211_bss *bss;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
if (is_bgscan_resp)
scan_rsp = &resp->params.bg_scan_query_resp.scan_resp;
else
scan_rsp = &resp->params.scan_resp;
if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
goto check_next_scan;
}
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
bytes_left);
scan_resp_size = le16_to_cpu(resp->size);
dev_dbg(adapter->dev,
"info: SCAN_RESP: returned %d APs before parsing\n",
scan_rsp->number_of_sets);
bss_info = scan_rsp->bss_desc_and_tlv_buffer;
/*
* The size of the TLV buffer is equal to the entire command response
* size (scan_resp_size) minus the fixed fields (sizeof()'s), the
* BSS Descriptions (bss_descript_size as bytesLef) and the command
* response header (S_DS_GEN)
*/
tlv_buf_size = scan_resp_size - (bytes_left
+ sizeof(scan_rsp->bss_descript_size)
+ sizeof(scan_rsp->number_of_sets)
+ S_DS_GEN);
tlv_data = (struct mwifiex_ie_types_data *) (scan_rsp->
bss_desc_and_tlv_buffer +
bytes_left);
/* Search the TLV buffer space in the scan response for any valid
TLVs */
mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
TLV_TYPE_TSFTIMESTAMP,
(struct mwifiex_ie_types_data **)
&tsf_tlv);
/* Search the TLV buffer space in the scan response for any valid
TLVs */
mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
TLV_TYPE_CHANNELBANDLIST,
(struct mwifiex_ie_types_data **)
&chan_band_tlv);
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
u8 bssid[ETH_ALEN];
s32 rssi;
const u8 *ie_buf;
size_t ie_len;
u16 channel = 0;
__le64 fw_tsf = 0;
u16 beacon_size = 0;
u32 curr_bcn_bytes;
u32 freq;
u16 beacon_period;
u16 cap_info_bitmap;
u8 *current_ptr;
u64 timestamp;
struct mwifiex_bcn_param *bcn_param;
struct mwifiex_bss_priv *bss_priv;
if (bytes_left >= sizeof(beacon_size)) {
/* Extract & convert beacon size from command buffer */
memcpy(&beacon_size, bss_info, sizeof(beacon_size));
bytes_left -= sizeof(beacon_size);
bss_info += sizeof(beacon_size);
}
if (!beacon_size || beacon_size > bytes_left) {
bss_info += bytes_left;
bytes_left = 0;
ret = -1;
goto check_next_scan;
}
/* Initialize the current working beacon pointer for this BSS
* iteration */
current_ptr = bss_info;
/* Advance the return beacon pointer past the current beacon */
bss_info += beacon_size;
bytes_left -= beacon_size;
curr_bcn_bytes = beacon_size;
/*
* First 5 fields are bssid, RSSI, time stamp, beacon interval,
* and capability information
*/
if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
dev_err(adapter->dev,
"InterpretIE: not enough bytes left\n");
continue;
}
bcn_param = (struct mwifiex_bcn_param *)current_ptr;
current_ptr += sizeof(*bcn_param);
curr_bcn_bytes -= sizeof(*bcn_param);
memcpy(bssid, bcn_param->bssid, ETH_ALEN);
rssi = (s32) bcn_param->rssi;
rssi = (-rssi) * 100; /* Convert dBm to mBm */
dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
timestamp = le64_to_cpu(bcn_param->timestamp);
beacon_period = le16_to_cpu(bcn_param->beacon_period);
cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
cap_info_bitmap);
/* Rest of the current buffer are IE's */
ie_buf = current_ptr;
ie_len = curr_bcn_bytes;
dev_dbg(adapter->dev,
"info: InterpretIE: IELength for this AP = %d\n",
curr_bcn_bytes);
while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
u8 element_id, element_len;
element_id = *current_ptr;
element_len = *(current_ptr + 1);
if (curr_bcn_bytes < element_len +
sizeof(struct ieee_types_header)) {
dev_err(priv->adapter->dev,
"%s: bytes left < IE length\n",
__func__);
goto check_next_scan;
}
if (element_id == WLAN_EID_DS_PARAMS) {
channel = *(current_ptr + sizeof(struct ieee_types_header));
break;
}
current_ptr += element_len +
sizeof(struct ieee_types_header);
curr_bcn_bytes -= element_len +
sizeof(struct ieee_types_header);
}
/*
* If the TSF TLV was appended to the scan results, save this
* entry's TSF value in the fw_tsf field. It is the firmware's
* TSF value at the time the beacon or probe response was
* received.
*/
if (tsf_tlv)
memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
sizeof(fw_tsf));
if (channel) {
struct ieee80211_channel *chan;
u8 band;
band = BAND_G;
if (chan_band_tlv) {
chan_band =
&chan_band_tlv->chan_band_param[idx];
band = mwifiex_radio_type_to_band(
chan_band->radio_type
& (BIT(0) | BIT(1)));
}
cfp = mwifiex_get_cfp(priv, band, channel, 0);
freq = cfp ? cfp->freq : 0;
chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
bss = cfg80211_inform_bss(priv->wdev->wiphy,
chan, bssid, timestamp,
cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL);
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
if (priv->media_connected &&
!memcmp(bssid,
priv->curr_bss_params.bss_descriptor
.mac_address, ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
cfg80211_put_bss(priv->wdev->wiphy, bss);
}
} else {
dev_dbg(adapter->dev, "missing BSS channel IE\n");
}
}
check_next_scan:
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
if (!priv->scan_request) {
dev_dbg(adapter->dev,
"complete internal scan\n");
mwifiex_complete_cmd(adapter,
adapter->curr_cmd);
}
}
if (priv->report_scan_result)
priv->report_scan_result = false;
if (priv->user_scan_cfg) {
if (priv->scan_request) {
dev_dbg(priv->adapter->dev,
"info: notifying scan done\n");
cfg80211_scan_done(priv->scan_request, 0);
priv->scan_request = NULL;
} else {
dev_dbg(priv->adapter->dev,
"info: scan already aborted\n");
}
kfree(priv->user_scan_cfg);
priv->user_scan_cfg = NULL;
}
} else {
if (priv->user_scan_cfg && !priv->scan_request) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
mod_timer(&priv->scan_delay_timer, jiffies);
dev_dbg(priv->adapter->dev,
"info: %s: triggerring scan abort\n", __func__);
} else if (!mwifiex_wmm_lists_empty(adapter) &&
(priv->scan_request && (priv->scan_request->flags &
NL80211_SCAN_FLAG_LOW_PRIORITY))) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
adapter->scan_delay_cnt = 1;
mod_timer(&priv->scan_delay_timer, jiffies +
msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
dev_dbg(priv->adapter->dev,
"info: %s: deferring scan\n", __func__);
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
}
}
return ret;
}
/*
* This function prepares command for background scan query.
*
* Preparation includes -
* - Setting command ID and proper size
* - Setting background scan flush parameter
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd)
{
struct host_cmd_ds_802_11_bg_scan_query *bg_query =
&cmd->params.bg_scan_query;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_QUERY);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_bg_scan_query)
+ S_DS_GEN);
bg_query->flush = 1;
return 0;
}
/*
* This function inserts scan command node to the scan pending queue.
*/
void
mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node)
{
struct mwifiex_adapter *adapter = priv->adapter;
unsigned long flags;
cmd_node->wait_q_enabled = true;
cmd_node->condition = &adapter->scan_wait_q_woken;
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
}
/*
* This function sends a scan command for all available channels to the
* firmware, filtered on a specific SSID.
*/
static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
struct cfg80211_ssid *req_ssid)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct mwifiex_user_scan_cfg *scan_cfg;
if (adapter->scan_processing) {
dev_err(adapter->dev, "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
dev_err(adapter->dev,
"cmd: Scan is blocked during association...\n");
return -EBUSY;
}
scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
if (!scan_cfg)
return -ENOMEM;
scan_cfg->ssid_list = req_ssid;
scan_cfg->num_ssids = 1;
ret = mwifiex_scan_networks(priv, scan_cfg);
kfree(scan_cfg);
return ret;
}
/*
* Sends IOCTL request to start a scan.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*
* Scan command can be issued for both normal scan and specific SSID
* scan, depending upon whether an SSID is provided or not.
*/
int mwifiex_request_scan(struct mwifiex_private *priv,
struct cfg80211_ssid *req_ssid)
{
int ret;
if (down_interruptible(&priv->async_sem)) {
dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
__func__);
return -1;
}
priv->adapter->scan_wait_q_woken = false;
if (req_ssid && req_ssid->ssid_len != 0)
/* Specific SSID scan */
ret = mwifiex_scan_specific_ssid(priv, req_ssid);
else
/* Normal scan */
ret = mwifiex_scan_networks(priv, NULL);
up(&priv->async_sem);
return ret;
}
/*
* This function appends the vendor specific IE TLV to a buffer.
*/
int
mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
u16 vsie_mask, u8 **buffer)
{
int id, ret_len = 0;
struct mwifiex_ie_types_vendor_param_set *vs_param_set;
if (!buffer)
return 0;
if (!(*buffer))
return 0;
/*
* Traverse through the saved vendor specific IE array and append
* the selected(scan/assoc/adhoc) IE as TLV to the command
*/
for (id = 0; id < MWIFIEX_MAX_VSIE_NUM; id++) {
if (priv->vs_ie[id].mask & vsie_mask) {
vs_param_set =
(struct mwifiex_ie_types_vendor_param_set *)
*buffer;
vs_param_set->header.type =
cpu_to_le16(TLV_TYPE_PASSTHROUGH);
vs_param_set->header.len =
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
& 0x00FF) + 2);
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
le16_to_cpu(vs_param_set->header.len));
*buffer += le16_to_cpu(vs_param_set->header.len) +
sizeof(struct mwifiex_ie_types_header);
ret_len += le16_to_cpu(vs_param_set->header.len) +
sizeof(struct mwifiex_ie_types_header);
}
}
return ret_len;
}
/*
* This function saves a beacon buffer of the current BSS descriptor.
*
* The current beacon buffer is saved so that it can be restored in the
* following cases that makes the beacon buffer not to contain the current
* ssid's beacon buffer.
* - The current ssid was not found somehow in the last scan.
* - The current ssid was the last entry of the scan table and overloaded.
*/
void
mwifiex_save_curr_bcn(struct mwifiex_private *priv)
{
struct mwifiex_bssdescriptor *curr_bss =
&priv->curr_bss_params.bss_descriptor;
if (!curr_bss->beacon_buf_size)
return;
/* allocate beacon buffer at 1st time; or if it's size has changed */
if (!priv->curr_bcn_buf ||
priv->curr_bcn_size != curr_bss->beacon_buf_size) {
priv->curr_bcn_size = curr_bss->beacon_buf_size;
kfree(priv->curr_bcn_buf);
priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size,
GFP_ATOMIC);
if (!priv->curr_bcn_buf)
return;
}
memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
curr_bss->beacon_buf_size);
dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
priv->curr_bcn_size);
curr_bss->beacon_buf = priv->curr_bcn_buf;
/* adjust the pointers in the current BSS descriptor */
if (curr_bss->bcn_wpa_ie)
curr_bss->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
(curr_bss->beacon_buf +
curr_bss->wpa_offset);
if (curr_bss->bcn_rsn_ie)
curr_bss->bcn_rsn_ie = (struct ieee_types_generic *)
(curr_bss->beacon_buf +
curr_bss->rsn_offset);
if (curr_bss->bcn_ht_cap)
curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *)
(curr_bss->beacon_buf +
curr_bss->ht_cap_offset);
if (curr_bss->bcn_ht_oper)
curr_bss->bcn_ht_oper = (struct ieee80211_ht_operation *)
(curr_bss->beacon_buf +
curr_bss->ht_info_offset);
if (curr_bss->bcn_vht_cap)
curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
curr_bss->vht_cap_offset);
if (curr_bss->bcn_vht_oper)
curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
curr_bss->vht_info_offset);
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
(curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
if (curr_bss->bcn_ext_cap)
curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
curr_bss->ext_cap_offset;
if (curr_bss->oper_mode)
curr_bss->oper_mode = (void *)(curr_bss->beacon_buf +
curr_bss->oper_mode_offset);
}
/*
* This function frees the current BSS descriptor beacon buffer.
*/
void
mwifiex_free_curr_bcn(struct mwifiex_private *priv)
{
kfree(priv->curr_bcn_buf);
priv->curr_bcn_buf = NULL;
}
| gpl-2.0 |
CTXz/Degaswifi-CPU-tweaks | drivers/scsi/tmscsim.c | 2379 | 77147 | /************************************************************************
* FILE NAME : TMSCSIM.C *
* BY : C.L. Huang, ching@tekram.com.tw *
* Description: Device Driver for Tekram DC-390(T) PCI SCSI *
* Bus Master Host Adapter *
* (C)Copyright 1995-1996 Tekram Technology Co., Ltd. *
************************************************************************
* (C) Copyright: put under GNU GPL in 10/96 *
* (see Documentation/scsi/tmscsim.txt) *
************************************************************************
* $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ *
* Enhancements and bugfixes by *
* Kurt Garloff <kurt@garloff.de> <garloff@suse.de> *
************************************************************************
* HISTORY: *
* *
* REV# DATE NAME DESCRIPTION *
* 1.00 96/04/24 CLH First release *
* 1.01 96/06/12 CLH Fixed bug of Media Change for Removable *
* Device, scan all LUN. Support Pre2.0.10 *
* 1.02 96/06/18 CLH Fixed bug of Command timeout ... *
* 1.03 96/09/25 KG Added tmscsim_proc_info() *
* 1.04 96/10/11 CLH Updating for support KV 2.0.x *
* 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)*
* 1.06 96/10/25 KG Fixed module support *
* 1.07 96/11/09 KG Fixed tmscsim_proc_info() *
* 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() *
* 1.09 96/11/30 KG Added register the allocated IO space *
* 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset *
* pending interrupt in DC390_detect() *
* 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater *
* than 1GB *
* 1.12 98/02/15 MJ Rewritten PCI probing *
* 1.13 98/04/08 KG Support for non DC390, __initfunc decls,*
* changed max devs from 10 to 16 *
* 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev *
* for LUNs if LUN_SCAN (BIOS) not set *
* runtime config using /proc interface *
* 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks *
* 1.14c 98/05/07 KG 2.0.x compatibility *
* 1.20a 98/05/07 KG changed names of funcs to be consistent *
* DC390_ (entry points), dc390_ (internal)*
* reworked locking *
* 1.20b 98/05/12 KG bugs: version, kfree, _ctmp *
* debug output *
* 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults *
* 1.20d 98/05/14 KG bugs: list linkage, clear flag after *
* reset on startup, code cleanup *
* 1.20e 98/05/15 KG spinlock comments, name space cleanup *
* pLastDCB now part of ACB structure *
* added stats, timeout for 2.1, TagQ bug *
* RESET and INQUIRY interface commands *
* 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs *
* for missing LUNs, pending int *
* 1.20g 98/05/19 KG Clean up: Avoid short *
* 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... *
* 1.20i 98/05/21 KG Aiiie: Bug with TagQMask *
* 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB *
* == 0 in remove_dev and DoingSRB_Done *
* 1.20k 98/05/25 KG DMA_INT (experimental) *
* 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; *
* 1.20m 98/06/10 KG glitch configurable; made some global *
* vars part of ACB; use DC390_readX *
* 1.20n 98/06/11 KG startup params *
* 1.20o 98/06/15 KG added TagMaxNum to boot/module params *
* Device Nr -> Idx, TagMaxNum power of 2 *
* 1.20p 98/06/17 KG Docu updates. Reset depends on settings *
* pci_set_master added; 2.0.xx: pcibios_* *
* used instead of MechNum things ... *
* 1.20q 98/06/23 KG Changed defaults. Added debug code for *
* removable media and fixed it. TagMaxNum *
* fixed for DC390. Locking: ACB, DRV for *
* better IRQ sharing. Spelling: Queueing *
* Parsing and glitch_cfg changes. Display *
* real SyncSpeed value. Made DisConn *
* functional (!) *
* 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set *
* BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module *
* param -1 fixed. *
* 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,*
* phys_to_bus instead of phys_to_virt, *
* fixed sel. process, fixed locking, *
* added MODULE_XXX infos, changed IRQ *
* request flags, disable DMA_INT *
* 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;*
* initfunc -> __init; better abort; *
* Timeout for XFER_DONE & BLAST_COMPLETE; *
* Allow up to 33 commands being processed *
* 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing *
* all flags. Clear within while() loops *
* in DataIn_0/Out_0. Null ptr in dumpinfo *
* for pSRB==0. Better locking during init.*
* bios_param() now respects part. table. *
* 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. *
* Disallow illegal idx in INQUIRY/REMOVE *
* 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, *
* Write Erase DMA (1.20t) caused problems *
* 2.0d 98/12/25 KG Christmas release ;-) Message handling *
* completely reworked. Handle target ini- *
* tiated SDTR correctly. *
* 2.0d1 99/01/25 KG Try to handle RESTORE_PTR *
* 2.0d2 99/02/08 KG Check for failure of kmalloc, correct *
* inclusion of scsicam.h, DelayReset *
* 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, *
* detect Target mode and warn. *
* pcmd->result handling cleaned up. *
* 2.0d4 99/06/01 KG Cleaned selection process. Found bug *
* which prevented more than 16 tags. Now: *
* 24. SDTR cleanup. Cleaner multi-LUN *
* handling. Don't modify ControlRegs/FIFO *
* when connected. *
* 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. *
* 2.0d6 99/06/02 KG Added ADD special command to allow cfg. *
* before detection. Reset SYNC_NEGO_DONE *
* after a bus reset. *
* 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands *
* 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. *
* Fixed Oops in _release(). *
* 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... *
* Allow arb. no. of Tagged Cmnds. Max 32 *
* 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing *
* clearified (renamed ..) TagMask handling*
* cleaned. *
* 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 *
* 2.0d1299/07/04 KG Changed order of processing in IRQ *
* 2.0d1399/07/05 KG Don't update DCB fields if removed *
* 2.0d1499/07/05 KG remove_dev: Move kfree() to the end *
* 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where *
* appropriate *
* 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg *
* 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when *
* there are queued cmnds *
* 2.0d1899/07/18 KG Selection timeout: Don't requeue *
* 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued *
* 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done *
* 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for*
* RETRY, SRBdone does DID_ABORT for the *
* cmd passed by DC390_reset() *
* 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 *
* 2.0d2399/08/25 KG Removed some debugging code. dev_id *
* now is set to pACB. Use u8,u16,u32. *
* 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call *
* done () w/ DID_BAD_TARGET in case of *
* missing DCB. We are old EH!! *
* 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz *
* set unique_id. Disable RETRY message. *
* 2.0d2600/01/29 KG Go to new EH. *
* 2.0d2700/01/31 KG ... but maintain 2.0 compat. *
* and fix DCB freeing *
* 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd*
* Waiting_Timer for failed StartSCSI *
* New EH: Don't return cmnds to ML on RST *
* Use old EH (don't have new EH fns yet) *
* Reset: Unlock, but refuse to queue *
* 2.3 __setup function *
* 2.0e 00/05/22 KG Return residual for 2.3 *
* 2.0e1 00/05/25 KG Compile fixes for 2.3.99 *
* 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() *
* 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego *
* before INQUIRY has reported ability. *
* Recognise INQUIRY as scanning command. *
* 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel *
* 2.0e5 00/11/17 KG Store Inq.flags in DCB *
* 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) *
* 2.4 PCI device table (Thx to A.Richter) *
* 2.0e7 00/11/28 KG Allow overriding of BIOS settings *
* 2.0f 00/12/20 KG Handle failed INQUIRYs during scan *
* 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to *
* use the current PCI-mapping API, update *
* command-queuing. *
* 2.1b 04/04/13 GL Fix for 64-bit platforms *
* 2.1b1 04/01/31 GL (applied 05.04) Remove internal *
* command-queuing. *
* 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling *
* 2.1c 04/05/23 GL Update to use the new pci_driver API, *
* some scsi EH updates, more cleanup. *
* 2.1d 04/05/27 GL Moved setting of scan_devices to *
* slave_alloc/_configure/_destroy, as *
* suggested by CH. *
***********************************************************************/
/* DEBUG options */
//#define DC390_DEBUG0
//#define DC390_DEBUG1
//#define DC390_DCBDEBUG
//#define DC390_PARSEDEBUG
//#define DC390_REMOVABLEDEBUG
//#define DC390_LOCKDEBUG
//#define NOP do{}while(0)
#define C_NOP
/* Debug definitions */
#ifdef DC390_DEBUG0
# define DEBUG0(x) x
#else
# define DEBUG0(x) C_NOP
#endif
#ifdef DC390_DEBUG1
# define DEBUG1(x) x
#else
# define DEBUG1(x) C_NOP
#endif
#ifdef DC390_DCBDEBUG
# define DCBDEBUG(x) x
#else
# define DCBDEBUG(x) C_NOP
#endif
#ifdef DC390_PARSEDEBUG
# define PARSEDEBUG(x) x
#else
# define PARSEDEBUG(x) C_NOP
#endif
#ifdef DC390_REMOVABLEDEBUG
# define REMOVABLEDEBUG(x) x
#else
# define REMOVABLEDEBUG(x) C_NOP
#endif
#define DCBDEBUG1(x) C_NOP
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_tcq.h>
#define DC390_BANNER "Tekram DC390/AM53C974"
#define DC390_VERSION "2.1d 2004-05-27"
#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI
#include "tmscsim.h"
static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus);
static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB );
static void dc390_Disconnect( struct dc390_acb* pACB );
static void dc390_Reselect( struct dc390_acb* pACB );
static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB );
static void dc390_ScsiRstDetect( struct dc390_acb* pACB );
static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*);
static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB);
static void dc390_ResetDevParam(struct dc390_acb* pACB);
static u32 dc390_laststatus = 0;
static u8 dc390_adapterCnt = 0;
static int disable_clustering;
module_param(disable_clustering, int, S_IRUGO);
MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1");
/* Startup values, to be overriden on the commandline */
static int tmscsim[] = {-2, -2, -2, -2, -2, -2};
module_param_array(tmscsim, int, NULL, 0);
MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)");
MODULE_AUTHOR("C.L. Huang / Kurt Garloff");
MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
static void *dc390_phase0[]={
dc390_DataOut_0,
dc390_DataIn_0,
dc390_Command_0,
dc390_Status_0,
dc390_Nop_0,
dc390_Nop_0,
dc390_MsgOut_0,
dc390_MsgIn_0,
dc390_Nop_1
};
static void *dc390_phase1[]={
dc390_DataOutPhase,
dc390_DataInPhase,
dc390_CommandPhase,
dc390_StatusPhase,
dc390_Nop_0,
dc390_Nop_0,
dc390_MsgOutPhase,
dc390_MsgInPhase,
dc390_Nop_1
};
#ifdef DC390_DEBUG1
static char* dc390_p0_str[] = {
"dc390_DataOut_0",
"dc390_DataIn_0",
"dc390_Command_0",
"dc390_Status_0",
"dc390_Nop_0",
"dc390_Nop_0",
"dc390_MsgOut_0",
"dc390_MsgIn_0",
"dc390_Nop_1"
};
static char* dc390_p1_str[] = {
"dc390_DataOutPhase",
"dc390_DataInPhase",
"dc390_CommandPhase",
"dc390_StatusPhase",
"dc390_Nop_0",
"dc390_Nop_0",
"dc390_MsgOutPhase",
"dc390_MsgInPhase",
"dc390_Nop_1"
};
#endif
static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN];
static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20};
static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20};
/***********************************************************************
* Functions for the management of the internal structures
* (DCBs, SRBs, Queueing)
*
**********************************************************************/
static void inline dc390_start_segment(struct dc390_srb* pSRB)
{
struct scatterlist *psgl = pSRB->pSegmentList;
/* start new sg segment */
pSRB->SGBusAddr = sg_dma_address(psgl);
pSRB->SGToBeXferLen = sg_dma_len(psgl);
}
static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue)
{
unsigned long xfer = pSRB->SGToBeXferLen - residue;
/* xfer more bytes transferred */
pSRB->SGBusAddr += xfer;
pSRB->TotalXferredLen += xfer;
pSRB->SGToBeXferLen = residue;
return xfer;
}
static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun)
{
struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL;
while (pDCB->TargetID != id || pDCB->TargetLUN != lun)
{
pDCB = pDCB->pNextDCB;
if (pDCB == pACB->pLinkDCB)
return NULL;
}
DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \
pDCB, pDCB->TargetID, pDCB->TargetLUN));
return pDCB;
}
/* Insert SRB oin top of free list */
static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
DEBUG0(printk ("DC390: Free SRB %p\n", pSRB));
pSRB->pNextSRB = pACB->pFreeSRB;
pACB->pFreeSRB = pSRB;
}
static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
pDCB->GoingSRBCnt++;
DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB));
/* Append to the list of Going commands */
if( pDCB->pGoingSRB )
pDCB->pGoingLast->pNextSRB = pSRB;
else
pDCB->pGoingSRB = pSRB;
pDCB->pGoingLast = pSRB;
/* No next one in sent list */
pSRB->pNextSRB = NULL;
}
static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB));
if (pSRB == pDCB->pGoingSRB)
pDCB->pGoingSRB = pSRB->pNextSRB;
else
{
struct dc390_srb* psrb = pDCB->pGoingSRB;
while (psrb && psrb->pNextSRB != pSRB)
psrb = psrb->pNextSRB;
if (!psrb)
{ printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; }
psrb->pNextSRB = pSRB->pNextSRB;
if (pSRB == pDCB->pGoingLast)
pDCB->pGoingLast = psrb;
}
pDCB->GoingSRBCnt--;
}
static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
{
sg_init_one(sg, addr, length);
return sg;
}
/* Create pci mapping */
static int dc390_pci_map (struct dc390_srb* pSRB)
{
int error = 0;
struct scsi_cmnd *pcmd = pSRB->pcmd;
struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp));
/* Map sense buffer */
if (pSRB->SRBFlag & AUTO_REQSENSE) {
pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
DMA_FROM_DEVICE);
cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
/* TODO: error handling */
if (pSRB->SGcount != 1)
error = 1;
DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
/* Map SG list */
} else if (scsi_sg_count(pcmd)) {
int nseg;
nseg = scsi_dma_map(pcmd);
pSRB->pSegmentList = scsi_sglist(pcmd);
pSRB->SGcount = nseg;
/* TODO: error handling */
if (nseg < 0)
error = 1;
DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
__func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
/* Map single segment */
} else
pSRB->SGcount = 0;
return error;
}
/* Remove pci mapping */
static void dc390_pci_unmap (struct dc390_srb* pSRB)
{
struct scsi_cmnd *pcmd = pSRB->pcmd;
struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev;
DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)));
if (pSRB->SRBFlag) {
pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
} else {
scsi_dma_unmap(pcmd);
DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
__func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
}
}
static void __inline__
dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
if (pSRB->TagNumber != SCSI_NO_TAG) {
pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */
pSRB->TagNumber = SCSI_NO_TAG;
}
}
static int
dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
{
struct scsi_cmnd *scmd = pSRB->pcmd;
struct scsi_device *sdev = scmd->device;
u8 cmd, disc_allowed, try_sync_nego;
char tag[2];
pSRB->ScsiPhase = SCSI_NOP0;
if (pACB->Connected)
{
// Should not happen normally
printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n",
pSRB->SRBState, pSRB->SRBFlag);
pSRB->SRBState = SRB_READY;
pACB->SelConn++;
return 1;
}
if (time_before (jiffies, pACB->pScsiHost->last_reset))
{
DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
return 1;
}
/* KG: Moved pci mapping here */
dc390_pci_map(pSRB);
/* TODO: error handling */
DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
DC390_write8 (Sync_Period, pDCB->SyncPeriod);
DC390_write8 (Sync_Offset, pDCB->SyncOffset);
DC390_write8 (CtrlReg1, pDCB->CtrlR1);
DC390_write8 (CtrlReg3, pDCB->CtrlR3);
DC390_write8 (CtrlReg4, pDCB->CtrlR4);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */
DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\
scmd->cmnd[0], pDCB->SyncMode));
/* Don't disconnect on AUTO_REQSENSE, cause it might be an
* Contingent Allegiance Condition (6.6), where no tags should be used.
* All other have to be allowed to disconnect to prevent Incorrect
* Initiator Connection (6.8.2/6.5.2) */
/* Changed KG, 99/06/06 */
if (! (pSRB->SRBFlag & AUTO_REQSENSE))
disc_allowed = pDCB->DevMode & EN_DISCONNECT_;
else
disc_allowed = 0;
if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr &&
(((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) &&
!(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY))
try_sync_nego = 1;
else
try_sync_nego = 0;
pSRB->MsgCnt = 0;
cmd = SEL_W_ATN;
DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN));
/* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */
if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && scsi_populate_tag_msg(scmd, tag)) {
DC390_write8(ScsiFifo, tag[0]);
pDCB->TagMask |= 1 << tag[1];
pSRB->TagNumber = tag[1];
DC390_write8(ScsiFifo, tag[1]);
DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1]));
cmd = SEL_W_ATN3;
} else {
/* No TagQ */
//no_tag:
DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB));
}
pSRB->SRBState = SRB_START_;
if (try_sync_nego)
{
u8 Sync_Off = pDCB->SyncOffset;
DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN));
pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE;
pSRB->MsgOutBuf[1] = 3;
pSRB->MsgOutBuf[2] = EXTENDED_SDTR;
pSRB->MsgOutBuf[3] = pDCB->NegoPeriod;
if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET;
pSRB->MsgOutBuf[4] = Sync_Off;
pSRB->MsgCnt = 5;
//pSRB->SRBState = SRB_MSGOUT_;
pSRB->SRBState |= DO_SYNC_NEGO;
cmd = SEL_W_ATN_STOP;
}
/* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */
if (cmd != SEL_W_ATN_STOP)
{
if( pSRB->SRBFlag & AUTO_REQSENSE )
{
DC390_write8 (ScsiFifo, REQUEST_SENSE);
DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
DC390_write8 (ScsiFifo, 0);
DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n"));
}
else /* write cmnd to bus */
{
u8 *ptr; u8 i;
ptr = (u8 *)scmd->cmnd;
for (i = 0; i < scmd->cmd_len; i++)
DC390_write8 (ScsiFifo, *(ptr++));
}
}
DEBUG0(if (pACB->pActiveDCB) \
printk (KERN_WARNING "DC390: ActiveDCB != 0\n"));
DEBUG0(if (pDCB->pActiveSRB) \
printk (KERN_WARNING "DC390: ActiveSRB != 0\n"));
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
if (DC390_read8 (Scsi_Status) & INTERRUPT)
{
dc390_freetag (pDCB, pSRB);
DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
scmd->device->id, scmd->device->lun));
pSRB->SRBState = SRB_READY;
//DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pACB->SelLost++;
return 1;
}
DC390_write8 (ScsiCmd, cmd);
pACB->pActiveDCB = pDCB;
pDCB->pActiveSRB = pSRB;
pACB->Connected = 1;
pSRB->ScsiPhase = SCSI_NOP1;
return 0;
}
static void __inline__
dc390_InvalidCmd(struct dc390_acb* pACB)
{
if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT))
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
}
static irqreturn_t __inline__
DC390_Interrupt(void *dev_id)
{
struct dc390_acb *pACB = dev_id;
struct dc390_dcb *pDCB;
struct dc390_srb *pSRB;
u8 sstatus=0;
u8 phase;
void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *);
u8 istate, istatus;
sstatus = DC390_read8 (Scsi_Status);
if( !(sstatus & INTERRUPT) )
return IRQ_NONE;
DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus));
//DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
//dstatus = DC390_read8 (DMA_Status);
//DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
spin_lock_irq(pACB->pScsiHost->host_lock);
istate = DC390_read8 (Intern_State);
istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */
DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus));
dc390_laststatus &= ~0x00ffffff;
dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus;
if (sstatus & ILLEGAL_OP_ERR)
{
printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus);
dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB);
}
else if (istatus & INVALID_CMD)
{
printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus);
dc390_InvalidCmd( pACB );
goto unlock;
}
if (istatus & SCSI_RESET)
{
dc390_ScsiRstDetect( pACB );
goto unlock;
}
if (istatus & DISCONNECTED)
{
dc390_Disconnect( pACB );
goto unlock;
}
if (istatus & RESELECTED)
{
dc390_Reselect( pACB );
goto unlock;
}
else if (istatus & (SELECTED | SEL_ATTENTION))
{
printk (KERN_ERR "DC390: Target mode not supported!\n");
goto unlock;
}
if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) )
{
pDCB = pACB->pActiveDCB;
if (!pDCB)
{
printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n");
goto unlock;
}
pSRB = pDCB->pActiveSRB;
if( pDCB->DCBFlag & ABORT_DEV_ )
dc390_EnableMsgOut_Abort (pACB, pSRB);
phase = pSRB->ScsiPhase;
DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus));
stateV = (void *) dc390_phase0[phase];
( *stateV )( pACB, pSRB, &sstatus );
pSRB->ScsiPhase = sstatus & 7;
phase = (u8) sstatus & 7;
DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus));
stateV = (void *) dc390_phase1[phase];
( *stateV )( pACB, pSRB, &sstatus );
}
unlock:
spin_unlock_irq(pACB->pScsiHost->host_lock);
return IRQ_HANDLED;
}
static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id)
{
irqreturn_t ret;
DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq));
/* Locking is done in DC390_Interrupt */
ret = DC390_Interrupt(dev_id);
DEBUG1(printk (".. IRQ returned\n"));
return ret;
}
static void
dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
u8 sstatus;
u32 ResidCnt;
u8 dstate = 0;
sstatus = *psstatus;
if( !(pSRB->SRBState & SRB_XFERPAD) )
{
if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) )
pSRB->SRBStatus |= PARITY_ERROR;
if( sstatus & COUNT_2_ZERO )
{
unsigned long timeout = jiffies + HZ;
/* Function called from the ISR with the host_lock held and interrupts disabled */
if (pSRB->SGToBeXferLen)
while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
spin_unlock_irq(pACB->pScsiHost->host_lock);
udelay(50);
spin_lock_irq(pACB->pScsiHost->host_lock);
}
if (!time_before(jiffies, timeout))
printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n",
DC390_read32 (DMA_Wk_ByteCntr));
dc390_laststatus &= ~0xff000000;
dc390_laststatus |= dstate << 24;
pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
pSRB->SGIndex++;
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
}
else
{
ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) +
(((u32) DC390_read8 (CtcReg_High) << 16) |
((u32) DC390_read8 (CtcReg_Mid) << 8) |
(u32) DC390_read8 (CtcReg_Low));
dc390_advance_segment(pSRB, ResidCnt);
}
}
if ((*psstatus & 7) != SCSI_DATA_OUT)
{
DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
}
}
static void
dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
u8 sstatus, residual, bval;
u32 ResidCnt, i;
unsigned long xferCnt;
sstatus = *psstatus;
if( !(pSRB->SRBState & SRB_XFERPAD) )
{
if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR))
pSRB->SRBStatus |= PARITY_ERROR;
if( sstatus & COUNT_2_ZERO )
{
int dstate = 0;
unsigned long timeout = jiffies + HZ;
/* Function called from the ISR with the host_lock held and interrupts disabled */
if (pSRB->SGToBeXferLen)
while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) {
spin_unlock_irq(pACB->pScsiHost->host_lock);
udelay(50);
spin_lock_irq(pACB->pScsiHost->host_lock);
}
if (!time_before(jiffies, timeout)) {
printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n",
DC390_read32 (DMA_Wk_ByteCntr));
printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate);
}
dc390_laststatus &= ~0xff000000;
dc390_laststatus |= dstate << 24;
DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \
+ ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \
+ ((unsigned long) DC390_read8 (CtcReg_Low)));
DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen));
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
pSRB->SGIndex++;
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
}
else /* phase changed */
{
residual = 0;
bval = DC390_read8 (Current_Fifo);
while( bval & 0x1f )
{
DEBUG1(printk (KERN_DEBUG "Check for residuals,"));
if( (bval & 0x1f) == 1 )
{
for(i=0; i < 0x100; i++)
{
bval = DC390_read8 (Current_Fifo);
if( !(bval & 0x1f) )
goto din_1;
else if( i == 0x0ff )
{
residual = 1; /* ;1 residual byte */
goto din_1;
}
}
}
else
bval = DC390_read8 (Current_Fifo);
}
din_1:
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD);
for (i = 0xa000; i; i--)
{
bval = DC390_read8 (DMA_Status);
if (bval & BLAST_COMPLETE)
break;
}
/* It seems a DMA Blast abort isn't that bad ... */
if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n");
//DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
dc390_laststatus &= ~0xff000000;
dc390_laststatus |= bval << 24;
DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval));
ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) |
((u32) DC390_read8 (CtcReg_Mid) << 8)) |
(u32) DC390_read8 (CtcReg_Low);
xferCnt = dc390_advance_segment(pSRB, ResidCnt);
if (residual) {
size_t count = 1;
size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList);
unsigned long flags;
u8 *ptr;
bval = DC390_read8 (ScsiFifo); /* get one residual byte */
local_irq_save(flags);
ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count);
if (likely(ptr)) {
*(ptr + offset) = bval;
scsi_kunmap_atomic_sg(ptr);
}
local_irq_restore(flags);
WARN_ON(!ptr);
/* 1 more byte read */
xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1);
}
DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\
pSRB->TotalXferredLen, pSRB->SGToBeXferLen));
}
}
if ((*psstatus & 7) != SCSI_DATA_IN)
{
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD);
}
}
static void
dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
}
static void
dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
pSRB->TargetStatus = DC390_read8 (ScsiFifo);
//udelay (1);
pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */
*psstatus = SCSI_NOP0;
pSRB->SRBState = SRB_COMPLETED;
DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
}
static void
dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) )
*psstatus = SCSI_NOP0;
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
}
static void __inline__
dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB)
{
DC390_write8 (Sync_Period, pDCB->SyncPeriod);
DC390_write8 (Sync_Offset, pDCB->SyncOffset);
DC390_write8 (CtrlReg3, pDCB->CtrlR3);
DC390_write8 (CtrlReg4, pDCB->CtrlR4);
dc390_SetXferRate (pACB, pDCB);
}
#ifdef DC390_DEBUG0
static void
dc390_printMsg (u8 *MsgBuf, u8 len)
{
int i;
printk (" %02x", MsgBuf[0]);
for (i = 1; i < len; i++)
printk (" %02x", MsgBuf[i]);
printk ("\n");
}
#endif
#define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD)
/* reject_msg */
static void __inline__
dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
pSRB->MsgOutBuf[0] = MESSAGE_REJECT;
pSRB->MsgCnt = 1;
DC390_ENABLE_MSGOUT;
DEBUG0 (printk (KERN_INFO "DC390: Reject message\n"));
}
/* abort command */
static void
dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB )
{
pSRB->MsgOutBuf[0] = ABORT;
pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_;
}
static struct dc390_srb*
dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag)
{
struct dc390_srb* pSRB = pDCB->pGoingSRB;
if (pSRB)
{
struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag);
pSRB = (struct dc390_srb *)scmd->host_scribble;
if (pDCB->DCBFlag & ABORT_DEV_)
{
pSRB->SRBState = SRB_ABORT_SENT;
dc390_EnableMsgOut_Abort( pACB, pSRB );
}
if (!(pSRB->SRBState & SRB_DISCONNECT))
goto mingx0;
pDCB->pActiveSRB = pSRB;
pSRB->SRBState = SRB_DATA_XFER;
}
else
{
mingx0:
pSRB = pACB->pTmpSRB;
pSRB->SRBState = SRB_UNEXPECT_RESEL;
pDCB->pActiveSRB = pSRB;
pSRB->MsgOutBuf[0] = ABORT_TAG;
pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT;
}
return pSRB;
}
/* set async transfer mode */
static void
dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
struct dc390_dcb* pDCB = pSRB->pSRBDCB;
if (!(pSRB->SRBState & DO_SYNC_NEGO))
printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID);
pSRB->SRBState &= ~DO_SYNC_NEGO;
pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE);
pDCB->SyncPeriod = 0;
pDCB->SyncOffset = 0;
//pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */
pDCB->CtrlR4 &= 0x3f;
pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
dc390_reprog (pACB, pDCB);
}
/* set sync transfer mode */
static void
dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
u8 bval;
u16 wval, wval1;
struct dc390_dcb* pDCB = pSRB->pSRBDCB;
u8 oldsyncperiod = pDCB->SyncPeriod;
u8 oldsyncoffset = pDCB->SyncOffset;
if (!(pSRB->SRBState & DO_SYNC_NEGO))
{
printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n",
pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]);
/* reject */
//dc390_MsgIn_reject (pACB, pSRB);
//return dc390_MsgIn_set_async (pACB, pSRB);
/* Reply with corrected SDTR Message */
if (pSRB->MsgInBuf[4] > 15)
{
printk (KERN_INFO "DC390: Lower Sync Offset to 15\n");
pSRB->MsgInBuf[4] = 15;
}
if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod)
{
printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2);
pSRB->MsgInBuf[3] = pDCB->NegoPeriod;
}
memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5);
pSRB->MsgCnt = 5;
DC390_ENABLE_MSGOUT;
}
pSRB->SRBState &= ~DO_SYNC_NEGO;
pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE;
pDCB->SyncOffset &= 0x0f0;
pDCB->SyncOffset |= pSRB->MsgInBuf[4];
pDCB->NegoPeriod = pSRB->MsgInBuf[3];
wval = (u16) pSRB->MsgInBuf[3];
wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */
if( (wval1 * 25) != wval) wval1++;
bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */
pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */
if (pACB->glitch_cfg != NS_TO_GLITCH(0))
pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1));
else
pDCB->CtrlR4 |= NS_TO_GLITCH(0);
if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */
if (wval1 >= 8)
{
wval1--; /* Timing computation differs by 1 from FAST_SCSI */
bval = FAST_CLK; /* fast clock / normal scsi */
pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */
}
pDCB->CtrlR3 = bval;
pDCB->SyncPeriod = (u8)wval1;
if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0)
{
if (! (bval & FAST_SCSI)) wval1++;
printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID,
40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f);
}
dc390_reprog (pACB, pDCB);
}
/* handle RESTORE_PTR */
/* This doesn't look very healthy... to-be-fixed */
static void
dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
{
struct scsi_cmnd *pcmd = pSRB->pcmd;
struct scatterlist *psgl;
pSRB->TotalXferredLen = 0;
pSRB->SGIndex = 0;
if (scsi_sg_count(pcmd)) {
size_t saved;
pSRB->pSegmentList = scsi_sglist(pcmd);
psgl = pSRB->pSegmentList;
//dc390_pci_sync(pSRB);
while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr)
{
pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl);
pSRB->SGIndex++;
if( pSRB->SGIndex < pSRB->SGcount )
{
pSRB->pSegmentList++;
dc390_start_segment(pSRB);
}
else
pSRB->SGToBeXferLen = 0;
}
saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen;
pSRB->SGToBeXferLen -= saved;
pSRB->SGBusAddr += saved;
printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
} else {
pSRB->SGcount = 0;
printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
}
pSRB->TotalXferredLen = pSRB->Saved_Ptr;
}
/* According to the docs, the AM53C974 reads the message and
* generates a Successful Operation IRQ before asserting ACK for
* the last byte (how does it know whether it's the last ?) */
/* The old code handled it in another way, indicating, that on
* every message byte an IRQ is generated and every byte has to
* be manually ACKed. Hmmm ? (KG, 98/11/28) */
/* The old implementation was correct. Sigh! */
/* Check if the message is complete */
static u8 __inline__
dc390_MsgIn_complete (u8 *msgbuf, u32 len)
{
if (*msgbuf == EXTENDED_MESSAGE)
{
if (len < 2) return 0;
if (len < msgbuf[1] + 2) return 0;
}
else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages
if (len < 2) return 0;
return 1;
}
/* read and eval received messages */
static void
dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
struct dc390_dcb* pDCB = pACB->pActiveDCB;
/* Read the msg */
pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo);
//pSRB->SRBState = 0;
/* Msg complete ? */
if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen))
{
DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen));
/* Now eval the msg */
switch (pSRB->MsgInBuf[0])
{
case DISCONNECT:
pSRB->SRBState = SRB_DISCONNECT; break;
case SIMPLE_QUEUE_TAG:
case HEAD_OF_QUEUE_TAG:
case ORDERED_QUEUE_TAG:
pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]);
break;
case MESSAGE_REJECT:
DC390_write8 (ScsiCmd, RESET_ATN_CMD);
pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */
if( pSRB->SRBState & DO_SYNC_NEGO)
dc390_MsgIn_set_async (pACB, pSRB);
break;
case EXTENDED_MESSAGE:
/* reject every extended msg but SDTR */
if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR)
dc390_MsgIn_reject (pACB, pSRB);
else
{
if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0)
dc390_MsgIn_set_async (pACB, pSRB);
else
dc390_MsgIn_set_sync (pACB, pSRB);
}
// nothing has to be done
case COMMAND_COMPLETE: break;
// SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the
// scsi command. Thanks, Gerard, for pointing it out.
case SAVE_POINTERS:
pSRB->Saved_Ptr = pSRB->TotalXferredLen;
break;
// The device might want to restart transfer with a RESTORE
case RESTORE_POINTERS:
DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n"));
dc390_restore_ptr (pACB, pSRB);
break;
// reject unknown messages
default: dc390_MsgIn_reject (pACB, pSRB);
}
/* Clear counter and MsgIn state */
pSRB->SRBState &= ~SRB_MSGIN;
pACB->MsgLen = 0;
}
*psstatus = SCSI_NOP0;
DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD);
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
}
static void
dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir)
{
unsigned long lval;
struct dc390_dcb* pDCB = pACB->pActiveDCB;
if (pSRB == pACB->pTmpSRB)
{
if (pDCB)
printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN);
else
printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n");
/* Try to recover - some broken disks react badly to tagged INQUIRY */
if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) {
pSRB = pDCB->pGoingSRB;
pDCB->pActiveSRB = pSRB;
} else {
pSRB->pSRBDCB = pDCB;
dc390_EnableMsgOut_Abort(pACB, pSRB);
if (pDCB)
pDCB->DCBFlag |= ABORT_DEV;
return;
}
}
if( pSRB->SGIndex < pSRB->SGcount )
{
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
if( !pSRB->SGToBeXferLen )
{
dc390_start_segment(pSRB);
DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment."));
}
lval = pSRB->SGToBeXferLen;
DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr));
DC390_write8 (CtcReg_Low, (u8) lval);
lval >>= 8;
DC390_write8 (CtcReg_Mid, (u8) lval);
lval >>= 8;
DC390_write8 (CtcReg_High, (u8) lval);
DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen);
DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr);
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
pSRB->SRBState = SRB_DATA_XFER;
DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD);
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
//DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT));
//DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status)));
//DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT));
}
else /* xfer pad */
{
if( pSRB->SGcount )
{
pSRB->AdaptStatus = H_OVER_UNDER_RUN;
pSRB->SRBStatus |= OVER_RUN;
DEBUG0(printk (KERN_WARNING " DC390: Overrun -"));
}
DEBUG0(printk (KERN_WARNING " Clear transfer pad \n"));
DC390_write8 (CtcReg_Low, 0);
DC390_write8 (CtcReg_Mid, 0);
DC390_write8 (CtcReg_High, 0);
pSRB->SRBState |= SRB_XFERPAD;
DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE);
/*
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir);
DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir);
*/
}
}
static void
dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION);
}
static void
dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION);
}
static void
dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
struct dc390_dcb* pDCB;
u8 i, cnt;
u8 *ptr;
DC390_write8 (ScsiCmd, RESET_ATN_CMD);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
if( !(pSRB->SRBFlag & AUTO_REQSENSE) )
{
cnt = (u8) pSRB->pcmd->cmd_len;
ptr = (u8 *) pSRB->pcmd->cmnd;
for(i=0; i < cnt; i++)
DC390_write8 (ScsiFifo, *(ptr++));
}
else
{
DC390_write8 (ScsiFifo, REQUEST_SENSE);
pDCB = pACB->pActiveDCB;
DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
DC390_write8 (ScsiFifo, 0);
DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
}
pSRB->SRBState = SRB_COMMAND;
DC390_write8 (ScsiCmd, INFO_XFER_CMD);
}
static void
dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pSRB->SRBState = SRB_STATUS;
DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE);
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
}
static void
dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
u8 bval, i, cnt;
u8 *ptr;
struct dc390_dcb* pDCB;
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pDCB = pACB->pActiveDCB;
if( !(pSRB->SRBState & SRB_MSGOUT) )
{
cnt = pSRB->MsgCnt;
if( cnt )
{
ptr = (u8 *) pSRB->MsgOutBuf;
for(i=0; i < cnt; i++)
DC390_write8 (ScsiFifo, *(ptr++));
pSRB->MsgCnt = 0;
if( (pDCB->DCBFlag & ABORT_DEV_) &&
(pSRB->MsgOutBuf[0] == ABORT) )
pSRB->SRBState = SRB_ABORT_SENT;
}
else
{
bval = ABORT; /* ??? MSG_NOP */
if( (pSRB->pcmd->cmnd[0] == INQUIRY ) ||
(pSRB->pcmd->cmnd[0] == REQUEST_SENSE) ||
(pSRB->SRBFlag & AUTO_REQSENSE) )
{
if( pDCB->SyncMode & SYNC_ENABLE )
goto mop1;
}
DC390_write8 (ScsiFifo, bval);
}
DC390_write8 (ScsiCmd, INFO_XFER_CMD);
}
else
{
mop1:
printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN);
DC390_write8 (ScsiFifo, EXTENDED_MESSAGE);
DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */
DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */
DC390_write8 (ScsiFifo, pDCB->NegoPeriod);
if (pDCB->SyncOffset & 0x0f)
DC390_write8 (ScsiFifo, pDCB->SyncOffset);
else
DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET);
pSRB->SRBState |= DO_SYNC_NEGO;
DC390_write8 (ScsiCmd, INFO_XFER_CMD);
}
}
static void
dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
if( !(pSRB->SRBState & SRB_MSGIN) )
{
pSRB->SRBState &= ~SRB_DISCONNECT;
pSRB->SRBState |= SRB_MSGIN;
}
DC390_write8 (ScsiCmd, INFO_XFER_CMD);
//DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
}
static void
dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
}
static void
dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus)
{
}
static void
dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB )
{
u8 bval, i, cnt;
struct dc390_dcb* ptr;
if( !(pDCB->TargetLUN) )
{
if( !pACB->scan_devices )
{
ptr = pACB->pLinkDCB;
cnt = pACB->DCBCnt;
bval = pDCB->TargetID;
for(i=0; i<cnt; i++)
{
if( ptr->TargetID == bval )
{
ptr->SyncPeriod = pDCB->SyncPeriod;
ptr->SyncOffset = pDCB->SyncOffset;
ptr->CtrlR3 = pDCB->CtrlR3;
ptr->CtrlR4 = pDCB->CtrlR4;
ptr->SyncMode = pDCB->SyncMode;
}
ptr = ptr->pNextDCB;
}
}
}
return;
}
static void
dc390_Disconnect( struct dc390_acb* pACB )
{
struct dc390_dcb *pDCB;
struct dc390_srb *pSRB, *psrb;
u8 i, cnt;
DEBUG0(printk(KERN_INFO "DISC,"));
if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n");
pACB->Connected = 0;
pDCB = pACB->pActiveDCB;
if (!pDCB)
{
DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\
pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel));
mdelay(400);
DC390_read8 (INT_Status); /* Reset Pending INT */
DC390_write8 (ScsiCmd, EN_SEL_RESEL);
return;
}
DC390_write8 (ScsiCmd, EN_SEL_RESEL);
pSRB = pDCB->pActiveSRB;
pACB->pActiveDCB = NULL;
pSRB->ScsiPhase = SCSI_NOP0;
if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
pSRB->SRBState = 0;
else if( pSRB->SRBState & SRB_ABORT_SENT )
{
pDCB->TagMask = 0;
pDCB->DCBFlag = 0;
cnt = pDCB->GoingSRBCnt;
pDCB->GoingSRBCnt = 0;
pSRB = pDCB->pGoingSRB;
for( i=0; i < cnt; i++)
{
psrb = pSRB->pNextSRB;
dc390_Free_insert (pACB, pSRB);
pSRB = psrb;
}
pDCB->pGoingSRB = NULL;
}
else
{
if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
!(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
{ /* Selection time out */
pSRB->AdaptStatus = H_SEL_TIMEOUT;
pSRB->TargetStatus = 0;
goto disc1;
}
else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
{
disc1:
dc390_freetag (pDCB, pSRB);
pDCB->pActiveSRB = NULL;
pSRB->SRBState = SRB_FREE;
dc390_SRBdone( pACB, pDCB, pSRB);
}
}
pACB->MsgLen = 0;
}
static void
dc390_Reselect( struct dc390_acb* pACB )
{
struct dc390_dcb* pDCB;
struct dc390_srb* pSRB;
u8 id, lun;
DEBUG0(printk(KERN_INFO "RSEL,"));
pACB->Connected = 1;
pDCB = pACB->pActiveDCB;
if( pDCB )
{ /* Arbitration lost but Reselection won */
DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n"));
pSRB = pDCB->pActiveSRB;
if( !( pACB->scan_devices ) )
{
struct scsi_cmnd *pcmd = pSRB->pcmd;
scsi_set_resid(pcmd, scsi_bufflen(pcmd));
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
dc390_Going_remove(pDCB, pSRB);
dc390_Free_insert(pACB, pSRB);
pcmd->scsi_done (pcmd);
DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB));
}
}
/* Get ID */
lun = DC390_read8 (ScsiFifo);
DEBUG0(printk ("Dev %02x,", lun));
if (!(lun & (1 << pACB->pScsiHost->this_id)))
printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun);
else
lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */
id = 0; while (lun >>= 1) id++;
/* Get LUN */
lun = DC390_read8 (ScsiFifo);
if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n");
lun &= 7;
DEBUG0(printk ("(%02i-%i),", id, lun));
pDCB = dc390_findDCB (pACB, id, lun);
if (!pDCB)
{
printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n",
id, lun);
return;
}
pACB->pActiveDCB = pDCB;
/* TagQ: We expect a message soon, so never mind the exact SRB */
if( pDCB->SyncMode & EN_TAG_QUEUEING )
{
pSRB = pACB->pTmpSRB;
pDCB->pActiveSRB = pSRB;
}
else
{
pSRB = pDCB->pActiveSRB;
if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
{
pSRB= pACB->pTmpSRB;
pSRB->SRBState = SRB_UNEXPECT_RESEL;
printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n",
id, lun);
pDCB->pActiveSRB = pSRB;
dc390_EnableMsgOut_Abort ( pACB, pSRB );
}
else
{
if( pDCB->DCBFlag & ABORT_DEV_ )
{
pSRB->SRBState = SRB_ABORT_SENT;
printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n",
id, lun);
dc390_EnableMsgOut_Abort( pACB, pSRB );
}
else
pSRB->SRBState = SRB_DATA_XFER;
}
}
DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber));
pSRB->ScsiPhase = SCSI_NOP0;
DC390_write8 (Scsi_Dest_ID, pDCB->TargetID);
DC390_write8 (Sync_Period, pDCB->SyncPeriod);
DC390_write8 (Sync_Offset, pDCB->SyncOffset);
DC390_write8 (CtrlReg1, pDCB->CtrlR1);
DC390_write8 (CtrlReg3, pDCB->CtrlR3);
DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */
DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */
}
static int __inline__
dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
struct scsi_cmnd *pcmd;
pcmd = pSRB->pcmd;
REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\
pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
pSRB->SRBFlag |= AUTO_REQSENSE;
pSRB->SavedTotXLen = pSRB->TotalXferredLen;
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
/* We are called from SRBdone, original PCI mapping has been removed
* already, new one is set up from StartSCSI */
pSRB->SGIndex = 0;
pSRB->TotalXferredLen = 0;
pSRB->SGToBeXferLen = 0;
return dc390_StartSCSI(pACB, pDCB, pSRB);
}
static void
dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB )
{
u8 status;
struct scsi_cmnd *pcmd;
pcmd = pSRB->pcmd;
/* KG: Moved pci_unmap here */
dc390_pci_unmap(pSRB);
status = pSRB->TargetStatus;
DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB));
if(pSRB->SRBFlag & AUTO_REQSENSE)
{ /* Last command was a Request Sense */
pSRB->SRBFlag &= ~AUTO_REQSENSE;
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
//pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
if (status == SAM_STAT_CHECK_CONDITION)
pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
else /* Retry */
{
if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
{
/* Don't retry on TEST_UNIT_READY */
pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
(u32) pcmd->result, (u32) pSRB->TotalXferredLen));
} else {
SET_RES_DRV(pcmd->result, DRIVER_SENSE);
//pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
}
goto cmd_done;
}
if( status )
{
if (status == SAM_STAT_CHECK_CONDITION)
{
if (dc390_RequestSense(pACB, pDCB, pSRB)) {
SET_RES_DID(pcmd->result, DID_ERROR);
goto cmd_done;
}
return;
}
else if (status == SAM_STAT_TASK_SET_FULL)
{
scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
else if (status == SAM_STAT_BUSY &&
(pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
pACB->scan_devices)
{
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = status;
pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0);
}
else
{ /* Another error */
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
goto cmd_done;
}
}
else
{ /* Target status == 0 */
status = pSRB->AdaptStatus;
if (status == H_OVER_UNDER_RUN)
{
pSRB->TargetStatus = 0;
SET_RES_DID(pcmd->result,DID_OK);
SET_RES_MSG(pcmd->result,pSRB->EndMessage);
}
else if (status == H_SEL_TIMEOUT)
{
pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
/* Devices are removed below ... */
}
else if( pSRB->SRBStatus & PARITY_ERROR)
{
//pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
SET_RES_DID(pcmd->result,DID_PARITY);
SET_RES_MSG(pcmd->result,pSRB->EndMessage);
}
else /* No error */
{
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = 0;
SET_RES_DID(pcmd->result,DID_OK);
}
}
cmd_done:
scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
dc390_Going_remove (pDCB, pSRB);
/* Add to free list */
dc390_Free_insert (pACB, pSRB);
DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n"));
pcmd->scsi_done (pcmd);
return;
}
/* Remove all SRBs from Going list and inform midlevel */
static void
dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd)
{
struct dc390_dcb *pDCB, *pdcb;
struct dc390_srb *psrb, *psrb2;
int i;
struct scsi_cmnd *pcmd;
pDCB = pACB->pLinkDCB;
pdcb = pDCB;
if (! pdcb) return;
do
{
psrb = pdcb->pGoingSRB;
for (i = 0; i < pdcb->GoingSRBCnt; i++)
{
psrb2 = psrb->pNextSRB;
pcmd = psrb->pcmd;
dc390_Free_insert (pACB, psrb);
psrb = psrb2;
}
pdcb->GoingSRBCnt = 0;
pdcb->pGoingSRB = NULL;
pdcb->TagMask = 0;
pdcb = pdcb->pNextDCB;
} while( pdcb != pDCB );
}
static void
dc390_ResetSCSIBus( struct dc390_acb* pACB )
{
//DC390_write8 (ScsiCmd, RST_DEVICE_CMD);
//udelay (250);
//DC390_write8 (ScsiCmd, NOP_CMD);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD);
pACB->Connected = 0;
return;
}
static void
dc390_ScsiRstDetect( struct dc390_acb* pACB )
{
printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus);
//DEBUG0(printk(KERN_INFO "RST_DETECT,"));
DC390_write8 (DMA_Cmd, DMA_IDLE_CMD);
/* Unlock before ? */
/* delay half a second */
udelay (1000);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pACB->pScsiHost->last_reset = jiffies + 5*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
pACB->Connected = 0;
if( pACB->ACBFlag & RESET_DEV )
pACB->ACBFlag |= RESET_DONE;
else
{ /* Reset was issued by sb else */
pACB->ACBFlag |= RESET_DETECT;
dc390_ResetDevParam( pACB );
dc390_DoingSRB_Done( pACB, NULL);
//dc390_RecoverSRB( pACB );
pACB->pActiveDCB = NULL;
pACB->ACBFlag = 0;
}
return;
}
static int DC390_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct scsi_device *sdev = cmd->device;
struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
struct dc390_dcb *dcb = sdev->hostdata;
struct dc390_srb *srb;
if (sdev->queue_depth <= dcb->GoingSRBCnt)
goto device_busy;
if (acb->pActiveDCB)
goto host_busy;
if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV))
goto host_busy;
srb = acb->pFreeSRB;
if (unlikely(srb == NULL))
goto host_busy;
cmd->scsi_done = done;
cmd->result = 0;
acb->Cmds++;
acb->pFreeSRB = srb->pNextSRB;
srb->pNextSRB = NULL;
srb->pSRBDCB = dcb;
srb->pcmd = cmd;
cmd->host_scribble = (char *)srb;
srb->SGIndex = 0;
srb->AdaptStatus = 0;
srb->TargetStatus = 0;
srb->MsgCnt = 0;
srb->SRBStatus = 0;
srb->SRBFlag = 0;
srb->SRBState = 0;
srb->TotalXferredLen = 0;
srb->SGBusAddr = 0;
srb->SGToBeXferLen = 0;
srb->ScsiPhase = 0;
srb->EndMessage = 0;
srb->TagNumber = SCSI_NO_TAG;
if (dc390_StartSCSI(acb, dcb, srb)) {
dc390_Free_insert(acb, srb);
goto host_busy;
}
dc390_Going_append(dcb, srb);
return 0;
host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
device_busy:
return SCSI_MLQUEUE_DEVICE_BUSY;
}
static DEF_SCSI_QCMD(DC390_queuecommand)
static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB)
{
struct pci_dev *pdev;
u16 pstat;
if (!pDCB) pDCB = pACB->pActiveDCB;
if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB;
if (pSRB)
{
printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n",
pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState,
pSRB->ScsiPhase);
printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus);
}
printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus);
printk ("DC390: Register dump: SCSI block:\n");
printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n");
printk ("DC390: %06x %02x %02x %02x",
DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16),
DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State));
printk (" %02x %02x %02x %02x %02x %02x\n",
DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1),
DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4));
DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT);
if (DC390_read8(Current_Fifo) & 0x1f)
{
printk ("DC390: FIFO:");
while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo));
printk ("\n");
}
printk ("DC390: Register dump: DMA engine:\n");
printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n");
printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n",
DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr),
DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr),
DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl));
DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
pdev = pACB->pdev;
pci_read_config_word(pdev, PCI_STATUS, &pstat);
printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
}
static int DC390_abort(struct scsi_cmnd *cmd)
{
struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata;
struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata;
scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n");
/* abort() is too stupid for already sent commands at the moment.
* If it's called we are in trouble anyway, so let's dump some info
* into the syslog at least. (KG, 98/08/20,99/06/20) */
dc390_dumpinfo(pACB, pDCB, NULL);
pDCB->DCBFlag |= ABORT_DEV_;
printk(KERN_INFO "DC390: Aborted.\n");
return FAILED;
}
static void dc390_ResetDevParam( struct dc390_acb* pACB )
{
struct dc390_dcb *pDCB, *pdcb;
pDCB = pACB->pLinkDCB;
if (! pDCB) return;
pdcb = pDCB;
do
{
pDCB->SyncMode &= ~SYNC_NEGO_DONE;
pDCB->SyncPeriod = 0;
pDCB->SyncOffset = 0;
pDCB->TagMask = 0;
pDCB->CtrlR3 = FAST_CLK;
pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK;
pDCB->CtrlR4 |= pACB->glitch_cfg;
pDCB = pDCB->pNextDCB;
}
while( pdcb != pDCB );
pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT);
}
static int DC390_bus_reset (struct scsi_cmnd *cmd)
{
struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata;
u8 bval;
spin_lock_irq(cmd->device->host->host_lock);
bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */
pACB->ACBFlag |= RESET_DEV;
dc390_ResetSCSIBus(pACB);
dc390_ResetDevParam(pACB);
mdelay(1);
pACB->pScsiHost->last_reset = jiffies + 3*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
DC390_read8(INT_Status); /* Reset Pending INT */
dc390_DoingSRB_Done(pACB, cmd);
pACB->pActiveDCB = NULL;
pACB->ACBFlag = 0;
bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST;
DC390_write8(CtrlReg1, bval); /* re-enable interrupt */
spin_unlock_irq(cmd->device->host->host_lock);
return SUCCESS;
}
/**
* dc390_slave_alloc - Called by the scsi mid layer to tell us about a new
* scsi device that we need to deal with.
*
* @scsi_device: The new scsi device that we need to handle.
*/
static int dc390_slave_alloc(struct scsi_device *scsi_device)
{
struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata;
struct dc390_dcb *pDCB, *pDCB2 = NULL;
uint id = scsi_device->id;
uint lun = scsi_device->lun;
pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
if (!pDCB)
return -ENOMEM;
if (!pACB->DCBCnt++) {
pACB->pLinkDCB = pDCB;
pACB->pDCBRunRobin = pDCB;
} else {
pACB->pLastDCB->pNextDCB = pDCB;
}
pDCB->pNextDCB = pACB->pLinkDCB;
pACB->pLastDCB = pDCB;
pDCB->pDCBACB = pACB;
pDCB->TargetID = id;
pDCB->TargetLUN = lun;
/*
* Some values are for all LUNs: Copy them
* In a clean way: We would have an own structure for a SCSI-ID
*/
if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) {
pDCB->DevMode = pDCB2->DevMode;
pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE;
pDCB->SyncPeriod = pDCB2->SyncPeriod;
pDCB->SyncOffset = pDCB2->SyncOffset;
pDCB->NegoPeriod = pDCB2->NegoPeriod;
pDCB->CtrlR3 = pDCB2->CtrlR3;
pDCB->CtrlR4 = pDCB2->CtrlR4;
} else {
u8 index = pACB->AdapterIndex;
PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2];
pDCB->DevMode = prom->EE_MODE1;
pDCB->NegoPeriod =
(dc390_clock_period1[prom->EE_SPEED] * 25) >> 2;
pDCB->CtrlR3 = FAST_CLK;
pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED;
if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION)
pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK;
}
if (pDCB->DevMode & SYNC_NEGO_)
pDCB->SyncMode |= SYNC_ENABLE;
else {
pDCB->SyncMode = 0;
pDCB->SyncOffset &= ~0x0f;
}
pDCB->CtrlR1 = pACB->pScsiHost->this_id;
if (pDCB->DevMode & PARITY_CHK_)
pDCB->CtrlR1 |= PARITY_ERR_REPO;
pACB->scan_devices = 1;
scsi_device->hostdata = pDCB;
return 0;
}
/**
* dc390_slave_destroy - Called by the scsi mid layer to tell us about a
* device that is going away.
*
* @scsi_device: The scsi device that we need to remove.
*/
static void dc390_slave_destroy(struct scsi_device *scsi_device)
{
struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata;
struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata;
struct dc390_dcb* pPrevDCB = pACB->pLinkDCB;
pACB->scan_devices = 0;
BUG_ON(pDCB->GoingSRBCnt > 1);
if (pDCB == pACB->pLinkDCB) {
if (pACB->pLastDCB == pDCB) {
pDCB->pNextDCB = NULL;
pACB->pLastDCB = NULL;
}
pACB->pLinkDCB = pDCB->pNextDCB;
} else {
while (pPrevDCB->pNextDCB != pDCB)
pPrevDCB = pPrevDCB->pNextDCB;
pPrevDCB->pNextDCB = pDCB->pNextDCB;
if (pDCB == pACB->pLastDCB)
pACB->pLastDCB = pPrevDCB;
}
if (pDCB == pACB->pActiveDCB)
pACB->pActiveDCB = NULL;
if (pDCB == pACB->pLinkDCB)
pACB->pLinkDCB = pDCB->pNextDCB;
if (pDCB == pACB->pDCBRunRobin)
pACB->pDCBRunRobin = pDCB->pNextDCB;
kfree(pDCB);
pACB->DCBCnt--;
}
static int dc390_slave_configure(struct scsi_device *sdev)
{
struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata;
struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata;
acb->scan_devices = 0;
if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) {
dcb->SyncMode |= EN_TAG_QUEUEING;
scsi_activate_tcq(sdev, acb->TagMaxNum);
}
return 0;
}
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.proc_name = "tmscsim",
.name = DC390_BANNER " V" DC390_VERSION,
.slave_alloc = dc390_slave_alloc,
.slave_configure = dc390_slave_configure,
.slave_destroy = dc390_slave_destroy,
.queuecommand = DC390_queuecommand,
.eh_abort_handler = DC390_abort,
.eh_bus_reset_handler = DC390_bus_reset,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */
};
/***********************************************************************
* Functions for access to DC390 EEPROM
* and some to emulate it
*
**********************************************************************/
static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
{
u8 carryFlag = 1, j = 0x80, bval;
int i;
for (i = 0; i < 9; i++) {
if (carryFlag) {
pci_write_config_byte(pdev, 0x80, 0x40);
bval = 0xc0;
} else
bval = 0x80;
udelay(160);
pci_write_config_byte(pdev, 0x80, bval);
udelay(160);
pci_write_config_byte(pdev, 0x80, 0);
udelay(160);
carryFlag = (cmd & j) ? 1 : 0;
j >>= 1;
}
}
static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
{
int i;
u16 wval = 0;
u8 bval;
for (i = 0; i < 16; i++) {
wval <<= 1;
pci_write_config_byte(pdev, 0x80, 0x80);
udelay(160);
pci_write_config_byte(pdev, 0x80, 0x40);
udelay(160);
pci_read_config_byte(pdev, 0x00, &bval);
if (bval == 0x22)
wval |= 1;
}
return wval;
}
static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
{
u8 cmd = EEPROM_READ, i;
for (i = 0; i < 0x40; i++) {
pci_write_config_byte(pdev, 0xc0, 0);
udelay(160);
dc390_eeprom_prepare_read(pdev, cmd++);
*ptr++ = dc390_eeprom_get_data(pdev);
pci_write_config_byte(pdev, 0x80, 0);
pci_write_config_byte(pdev, 0x80, 0);
udelay(160);
}
}
/* Override EEprom values with explicitly set values */
static void dc390_eeprom_override(u8 index)
{
u8 *ptr = (u8 *) dc390_eepromBuf[index], id;
/* Adapter Settings */
if (tmscsim[0] != -2)
ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */
if (tmscsim[3] != -2)
ptr[EE_MODE2] = (u8)tmscsim[3];
if (tmscsim[5] != -2)
ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */
if (tmscsim[4] != -2)
ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */
/* Device Settings */
for (id = 0; id < MAX_SCSI_ID; id++) {
if (tmscsim[2] != -2)
ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */
if (tmscsim[1] != -2)
ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */
}
}
static int tmscsim_def[] = {
7,
0 /* 10MHz */,
PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_,
MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK,
3 /* 16 Tags per LUN */,
1 /* s delay after Reset */,
};
/* Copy defaults over set values where missing */
static void dc390_fill_with_defaults (void)
{
int i;
for (i = 0; i < 6; i++) {
if (tmscsim[i] < 0 || tmscsim[i] > 255)
tmscsim[i] = tmscsim_def[i];
}
/* Sanity checks */
if (tmscsim[0] > 7)
tmscsim[0] = 7;
if (tmscsim[1] > 7)
tmscsim[1] = 4;
if (tmscsim[4] > 5)
tmscsim[4] = 4;
if (tmscsim[5] > 180)
tmscsim[5] = 180;
}
static void dc390_check_eeprom(struct pci_dev *pdev, u8 index)
{
u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120};
u8 EEbuf[128];
u16 *ptr = (u16 *)EEbuf, wval = 0;
int i;
dc390_read_eeprom(pdev, ptr);
memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID);
memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID],
&EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID);
dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]];
for (i = 0; i < 0x40; i++, ptr++)
wval += *ptr;
/* no Tekram EEprom found */
if (wval != 0x1234) {
int speed;
printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n");
/*
* XXX(hch): bogus, because we might have tekram and
* non-tekram hbas in a single machine.
*/
dc390_fill_with_defaults();
speed = dc390_clock_speed[tmscsim[1]];
printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), "
"DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n",
tmscsim[0], tmscsim[1], speed / 10, speed % 10,
(u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]);
}
}
static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
{
struct Scsi_Host *shost = pACB->pScsiHost;
u8 dstate;
/* Disable SCSI bus reset interrupt */
DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id);
if (pACB->Gmode2 & RST_SCSI_BUS) {
dc390_ResetSCSIBus(pACB);
udelay(1000);
shost->last_reset = jiffies + HZ/2 +
HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
}
pACB->ACBFlag = 0;
/* Reset Pending INT */
DC390_read8(INT_Status);
/* 250ms selection timeout */
DC390_write8(Scsi_TimeOut, SEL_TIMEOUT);
/* Conversion factor = 0 , 40MHz clock */
DC390_write8(Clk_Factor, CLK_FREQ_40MHZ);
/* NOP cmd - clear command register */
DC390_write8(ScsiCmd, NOP_CMD);
/* Enable Feature and SCSI-2 */
DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD);
/* Fast clock */
DC390_write8(CtrlReg3, FAST_CLK);
/* Negation */
DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */
(dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ?
NEGATE_REQACKDATA : 0);
/* Clear Transfer Count High: ID */
DC390_write8(CtcReg_High, 0);
DC390_write8(DMA_Cmd, DMA_IDLE_CMD);
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT);
dstate = DC390_read8(DMA_Status);
DC390_write8(DMA_Status, dstate);
}
static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct dc390_acb *pACB;
struct Scsi_Host *shost;
unsigned long io_port;
int error = -ENODEV, i;
if (pci_enable_device(pdev))
goto out;
pci_set_master(pdev);
error = -ENOMEM;
if (disable_clustering)
driver_template.use_clustering = DISABLE_CLUSTERING;
shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb));
if (!shost)
goto out_disable_device;
pACB = (struct dc390_acb *)shost->hostdata;
memset(pACB, 0, sizeof(struct dc390_acb));
dc390_check_eeprom(pdev, dc390_adapterCnt);
dc390_eeprom_override(dc390_adapterCnt);
io_port = pci_resource_start(pdev, 0);
shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID];
shost->io_port = io_port;
shost->n_io_port = 0x80;
shost->irq = pdev->irq;
shost->base = io_port;
shost->unique_id = io_port;
shost->last_reset = jiffies;
pACB->pScsiHost = shost;
pACB->IOPortBase = (u16) io_port;
pACB->IRQLevel = pdev->irq;
shost->max_id = 8;
if (shost->max_id - 1 ==
dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID])
shost->max_id--;
if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK)
shost->max_lun = 8;
else
shost->max_lun = 1;
pACB->pFreeSRB = pACB->SRB_array;
pACB->SRBCount = MAX_SRB_CNT;
pACB->AdapterIndex = dc390_adapterCnt;
pACB->TagMaxNum =
2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM];
pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2];
for (i = 0; i < pACB->SRBCount-1; i++)
pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1];
pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL;
pACB->pTmpSRB = &pACB->TmpSRB;
pACB->sel_timeout = SEL_TIMEOUT;
pACB->glitch_cfg = EATER_25NS;
pACB->pdev = pdev;
if (!request_region(io_port, shost->n_io_port, "tmscsim")) {
printk(KERN_ERR "DC390: register IO ports error!\n");
goto out_host_put;
}
/* Reset Pending INT */
DC390_read8_(INT_Status, io_port);
if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED,
"tmscsim", pACB)) {
printk(KERN_ERR "DC390: register IRQ error!\n");
goto out_release_region;
}
dc390_init_hw(pACB, dc390_adapterCnt);
dc390_adapterCnt++;
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(pdev->irq, pACB);
out_release_region:
release_region(io_port, shost->n_io_port);
out_host_put:
scsi_host_put(shost);
out_disable_device:
pci_disable_device(pdev);
out:
return error;
}
/**
* dc390_remove_one - Called to remove a single instance of the adapter.
*
* @dev: The PCI device to remove.
*/
static void dc390_remove_one(struct pci_dev *dev)
{
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
unsigned long iflags;
struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata;
u8 bval;
scsi_remove_host(scsi_host);
spin_lock_irqsave(scsi_host->host_lock, iflags);
pACB->ACBFlag = RESET_DEV;
bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST;
DC390_write8 (CtrlReg1, bval); /* disable interrupt */
if (pACB->Gmode2 & RST_SCSI_BUS)
dc390_ResetSCSIBus(pACB);
spin_unlock_irqrestore(scsi_host->host_lock, iflags);
free_irq(scsi_host->irq, pACB);
release_region(scsi_host->io_port, scsi_host->n_io_port);
pci_disable_device(dev);
scsi_host_put(scsi_host);
pci_set_drvdata(dev, NULL);
}
static struct pci_device_id tmscsim_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
static struct pci_driver dc390_driver = {
.name = "tmscsim",
.id_table = tmscsim_pci_tbl,
.probe = dc390_probe_one,
.remove = dc390_remove_one,
};
static int __init dc390_module_init(void)
{
if (!disable_clustering) {
printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
}
if (tmscsim[0] == -1 || tmscsim[0] > 15) {
tmscsim[0] = 7;
tmscsim[1] = 4;
tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_;
tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION;
tmscsim[4] = 2;
tmscsim[5] = 10;
printk (KERN_INFO "DC390: Using safe settings.\n");
}
return pci_register_driver(&dc390_driver);
}
static void __exit dc390_module_exit(void)
{
pci_unregister_driver(&dc390_driver);
}
module_init(dc390_module_init);
module_exit(dc390_module_exit);
#ifndef MODULE
static int __init dc390_setup (char *str)
{
int ints[8],i, im;
get_options(str, ARRAY_SIZE(ints), ints);
im = ints[0];
if (im > 6) {
printk (KERN_NOTICE "DC390: ignore extra params!\n");
im = 6;
}
for (i = 0; i < im; i++)
tmscsim[i] = ints[i+1];
/* dc390_checkparams (); */
return 1;
}
__setup("tmscsim=", dc390_setup);
#endif
| gpl-2.0 |
mdeejay/shooteru-ics-sense | drivers/pci/hotplug/shpchp_sysfs.c | 3659 | 3138 | /*
* Compaq Hot Plug Controller Driver
*
* Copyright (c) 1995,2001 Compaq Computer Corporation
* Copyright (c) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (c) 2001 IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "shpchp.h"
/* A few routines that create sysfs entries for the hot plug controller */
static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
char * out = buf;
int index, busnr;
struct resource *res;
struct pci_bus *bus;
pdev = container_of (dev, struct pci_dev, dev);
bus = pdev->subordinate;
out += sprintf(buf, "Free resources: memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
out += sprintf(out, "start = %8.8llx, "
"length = %8.8llx\n",
(unsigned long long)res->start,
(unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: prefetchable memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
out += sprintf(out, "start = %8.8llx, "
"length = %8.8llx\n",
(unsigned long long)res->start,
(unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: IO\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_IO)) {
out += sprintf(out, "start = %8.8llx, "
"length = %8.8llx\n",
(unsigned long long)res->start,
(unsigned long long)(res->end - res->start));
}
}
out += sprintf(out, "Free resources: bus numbers\n");
for (busnr = bus->secondary; busnr <= bus->subordinate; busnr++) {
if (!pci_find_bus(pci_domain_nr(bus), busnr))
break;
}
if (busnr < bus->subordinate)
out += sprintf(out, "start = %8.8x, length = %8.8x\n",
busnr, (bus->subordinate - busnr));
return out - buf;
}
static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
int __must_check shpchp_create_ctrl_files (struct controller *ctrl)
{
return device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
}
void shpchp_remove_ctrl_files(struct controller *ctrl)
{
device_remove_file(&ctrl->pci_dev->dev, &dev_attr_ctrl);
}
| gpl-2.0 |
androidbftab1/bf-kernel-3.4 | arch/powerpc/platforms/cell/spufs/syscalls.c | 4427 | 2137 | #include <linux/file.h>
#include <linux/fs.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "spufs.h"
/**
* sys_spu_run - run code loaded into an SPU
*
* @unpc: next program counter for the SPU
* @ustatus: status of the SPU
*
* This system call transfers the control of execution of a
* user space thread to an SPU. It will return when the
* SPU has finished executing or when it hits an error
* condition and it will be interrupted if a signal needs
* to be delivered to a handler in user space.
*
* The next program counter is set to the passed value
* before the SPU starts fetching code and the user space
* pointer gets updated with the new value when returning
* from kernel space.
*
* The status value returned from spu_run reflects the
* value of the spu_status register after the SPU has stopped.
*
*/
static long do_spu_run(struct file *filp,
__u32 __user *unpc,
__u32 __user *ustatus)
{
long ret;
struct spufs_inode_info *i;
u32 npc, status;
ret = -EFAULT;
if (get_user(npc, unpc))
goto out;
/* check if this file was created by spu_create */
ret = -EINVAL;
if (filp->f_op != &spufs_context_fops)
goto out;
i = SPUFS_I(filp->f_path.dentry->d_inode);
ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
ret = -EFAULT;
if (ustatus && put_user(status, ustatus))
ret = -EFAULT;
out:
return ret;
}
static long do_spu_create(const char __user *pathname, unsigned int flags,
umode_t mode, struct file *neighbor)
{
struct path path;
struct dentry *dentry;
int ret;
dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
ret = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
ret = spufs_create(&path, dentry, flags, mode, neighbor);
path_put(&path);
}
return ret;
}
struct spufs_calls spufs_calls = {
.create_thread = do_spu_create,
.spu_run = do_spu_run,
.coredump_extra_notes_size = spufs_coredump_extra_notes_size,
.coredump_extra_notes_write = spufs_coredump_extra_notes_write,
.notify_spus_active = do_notify_spus_active,
.owner = THIS_MODULE,
};
| gpl-2.0 |
Haxynox/kernel_samsung_n7100-old | lib/argv_split.c | 4427 | 1826 | /*
* Helper function for splitting a string into an argv-like array.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
static const char *skip_arg(const char *cp)
{
while (*cp && !isspace(*cp))
cp++;
return cp;
}
static int count_argc(const char *str)
{
int count = 0;
while (*str) {
str = skip_spaces(str);
if (*str) {
count++;
str = skip_arg(str);
}
}
return count;
}
/**
* argv_free - free an argv
* @argv - the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
void argv_free(char **argv)
{
char **p;
for (p = argv; *p; p++)
kfree(*p);
kfree(argv);
}
EXPORT_SYMBOL(argv_free);
/**
* argv_split - split a string at whitespace, returning an argv
* @gfp: the GFP mask used to allocate memory
* @str: the string to be split
* @argcp: returned argument count
*
* Returns an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
* is always NULL-terminated. Returns NULL on memory allocation
* failure.
*/
char **argv_split(gfp_t gfp, const char *str, int *argcp)
{
int argc = count_argc(str);
char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp);
char **argvp;
if (argv == NULL)
goto out;
if (argcp)
*argcp = argc;
argvp = argv;
while (*str) {
str = skip_spaces(str);
if (*str) {
const char *p = str;
char *t;
str = skip_arg(str);
t = kstrndup(p, str-p, gfp);
if (t == NULL)
goto fail;
*argvp++ = t;
}
}
*argvp = NULL;
out:
return argv;
fail:
argv_free(argv);
return NULL;
}
EXPORT_SYMBOL(argv_split);
| gpl-2.0 |
NicholasFengTW/linux | sound/oss/sb_mixer.c | 4683 | 19738 | /*
* sound/oss/sb_mixer.c
*
* The low level mixer driver for the Sound Blaster compatible cards.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* Rolf Fokkens (Dec 20 1998) : Moved ESS stuff into sb_ess.[ch]
* Stanislav Voronyi <stas@esc.kharkov.com> : Support for AWE 3DSE device (Jun 7 1999)
*/
#include <linux/slab.h>
#include "sound_config.h"
#define __SB_MIXER_C__
#include "sb.h"
#include "sb_mixer.h"
#include "sb_ess.h"
#define SBPRO_RECORDING_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD)
/* Same as SB Pro, unless I find otherwise */
#define SGNXPRO_RECORDING_DEVICES SBPRO_RECORDING_DEVICES
#define SBPRO_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD | SOUND_MASK_VOLUME)
/* SG NX Pro has treble and bass settings on the mixer. The 'speaker'
* channel is the COVOX/DisneySoundSource emulation volume control
* on the mixer. It does NOT control speaker volume. Should have own
* mask eventually?
*/
#define SGNXPRO_MIXER_DEVICES (SBPRO_MIXER_DEVICES|SOUND_MASK_BASS| \
SOUND_MASK_TREBLE|SOUND_MASK_SPEAKER )
#define SB16_RECORDING_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD)
#define SB16_OUTFILTER_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD)
#define SB16_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD | \
SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | \
SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | \
SOUND_MASK_IMIX)
/* These are the only devices that are working at the moment. Others could
* be added once they are identified and a method is found to control them.
*/
#define ALS007_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | \
SOUND_MASK_PCM | SOUND_MASK_MIC | \
SOUND_MASK_CD | \
SOUND_MASK_VOLUME)
static mixer_tab sbpro_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x22, 7, 4, 0x22, 3, 4),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x26, 7, 4, 0x26, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x04, 7, 4, 0x04, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x2e, 7, 4, 0x2e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x0a, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_CD, 0x28, 7, 4, 0x28, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0)
};
static mixer_tab sb16_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x30, 7, 5, 0x31, 7, 5),
MIX_ENT(SOUND_MIXER_BASS, 0x46, 7, 4, 0x47, 7, 4),
MIX_ENT(SOUND_MIXER_TREBLE, 0x44, 7, 4, 0x45, 7, 4),
MIX_ENT(SOUND_MIXER_SYNTH, 0x34, 7, 5, 0x35, 7, 5),
MIX_ENT(SOUND_MIXER_PCM, 0x32, 7, 5, 0x33, 7, 5),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3b, 7, 2, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x38, 7, 5, 0x39, 7, 5),
MIX_ENT(SOUND_MIXER_MIC, 0x3a, 7, 5, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_CD, 0x36, 7, 5, 0x37, 7, 5),
MIX_ENT(SOUND_MIXER_IMIX, 0x3c, 0, 1, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0x3f, 7, 2, 0x40, 7, 2), /* Obsolete. Use IGAIN */
MIX_ENT(SOUND_MIXER_IGAIN, 0x3f, 7, 2, 0x40, 7, 2),
MIX_ENT(SOUND_MIXER_OGAIN, 0x41, 7, 2, 0x42, 7, 2)
};
static mixer_tab als007_mix =
{
MIX_ENT(SOUND_MIXER_VOLUME, 0x62, 7, 4, 0x62, 3, 4),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x66, 7, 4, 0x66, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x64, 7, 4, 0x64, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x6e, 7, 4, 0x6e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x6a, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_CD, 0x68, 7, 4, 0x68, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0), /* Obsolete. Use IGAIN */
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0)
};
/* SM_GAMES Master volume is lower and PCM & FM volumes
higher than with SB Pro. This improves the
sound quality */
static int smg_default_levels[32] =
{
0x2020, /* Master Volume */
0x4b4b, /* Bass */
0x4b4b, /* Treble */
0x6464, /* FM */
0x6464, /* PCM */
0x4b4b, /* PC Speaker */
0x4b4b, /* Ext Line */
0x0000, /* Mic */
0x4b4b, /* CD */
0x4b4b, /* Recording monitor */
0x4b4b, /* SB PCM */
0x4b4b, /* Recording level */
0x4b4b, /* Input gain */
0x4b4b, /* Output gain */
0x4040, /* Line1 */
0x4040, /* Line2 */
0x1515 /* Line3 */
};
static int sb_default_levels[32] =
{
0x5a5a, /* Master Volume */
0x4b4b, /* Bass */
0x4b4b, /* Treble */
0x4b4b, /* FM */
0x4b4b, /* PCM */
0x4b4b, /* PC Speaker */
0x4b4b, /* Ext Line */
0x1010, /* Mic */
0x4b4b, /* CD */
0x0000, /* Recording monitor */
0x4b4b, /* SB PCM */
0x4b4b, /* Recording level */
0x4b4b, /* Input gain */
0x4b4b, /* Output gain */
0x4040, /* Line1 */
0x4040, /* Line2 */
0x1515 /* Line3 */
};
static unsigned char sb16_recmasks_L[SOUND_MIXER_NRDEVICES] =
{
0x00, /* SOUND_MIXER_VOLUME */
0x00, /* SOUND_MIXER_BASS */
0x00, /* SOUND_MIXER_TREBLE */
0x40, /* SOUND_MIXER_SYNTH */
0x00, /* SOUND_MIXER_PCM */
0x00, /* SOUND_MIXER_SPEAKER */
0x10, /* SOUND_MIXER_LINE */
0x01, /* SOUND_MIXER_MIC */
0x04, /* SOUND_MIXER_CD */
0x00, /* SOUND_MIXER_IMIX */
0x00, /* SOUND_MIXER_ALTPCM */
0x00, /* SOUND_MIXER_RECLEV */
0x00, /* SOUND_MIXER_IGAIN */
0x00 /* SOUND_MIXER_OGAIN */
};
static unsigned char sb16_recmasks_R[SOUND_MIXER_NRDEVICES] =
{
0x00, /* SOUND_MIXER_VOLUME */
0x00, /* SOUND_MIXER_BASS */
0x00, /* SOUND_MIXER_TREBLE */
0x20, /* SOUND_MIXER_SYNTH */
0x00, /* SOUND_MIXER_PCM */
0x00, /* SOUND_MIXER_SPEAKER */
0x08, /* SOUND_MIXER_LINE */
0x01, /* SOUND_MIXER_MIC */
0x02, /* SOUND_MIXER_CD */
0x00, /* SOUND_MIXER_IMIX */
0x00, /* SOUND_MIXER_ALTPCM */
0x00, /* SOUND_MIXER_RECLEV */
0x00, /* SOUND_MIXER_IGAIN */
0x00 /* SOUND_MIXER_OGAIN */
};
static char smw_mix_regs[] = /* Left mixer registers */
{
0x0b, /* SOUND_MIXER_VOLUME */
0x0d, /* SOUND_MIXER_BASS */
0x0d, /* SOUND_MIXER_TREBLE */
0x05, /* SOUND_MIXER_SYNTH */
0x09, /* SOUND_MIXER_PCM */
0x00, /* SOUND_MIXER_SPEAKER */
0x03, /* SOUND_MIXER_LINE */
0x01, /* SOUND_MIXER_MIC */
0x07, /* SOUND_MIXER_CD */
0x00, /* SOUND_MIXER_IMIX */
0x00, /* SOUND_MIXER_ALTPCM */
0x00, /* SOUND_MIXER_RECLEV */
0x00, /* SOUND_MIXER_IGAIN */
0x00, /* SOUND_MIXER_OGAIN */
0x00, /* SOUND_MIXER_LINE1 */
0x00, /* SOUND_MIXER_LINE2 */
0x00 /* SOUND_MIXER_LINE3 */
};
static int sbmixnum = 1;
static void sb_mixer_reset(sb_devc * devc);
void sb_mixer_set_stereo(sb_devc * devc, int mode)
{
sb_chgmixer(devc, OUT_FILTER, STEREO_DAC, (mode ? STEREO_DAC : MONO_DAC));
}
static int detect_mixer(sb_devc * devc)
{
/* Just trust the mixer is there */
return 1;
}
static void oss_change_bits(sb_devc *devc, unsigned char *regval, int dev, int chn, int newval)
{
unsigned char mask;
int shift;
mask = (1 << (*devc->iomap)[dev][chn].nbits) - 1;
newval = (int) ((newval * mask) + 50) / 100; /* Scale */
shift = (*devc->iomap)[dev][chn].bitoffs - (*devc->iomap)[dev][LEFT_CHN].nbits + 1;
*regval &= ~(mask << shift); /* Mask out previous value */
*regval |= (newval & mask) << shift; /* Set the new value */
}
static int sb_mixer_get(sb_devc * devc, int dev)
{
if (!((1 << dev) & devc->supported_devices))
return -EINVAL;
return devc->levels[dev];
}
void smw_mixer_init(sb_devc * devc)
{
int i;
sb_setmixer(devc, 0x00, 0x18); /* Mute unused (Telephone) line */
sb_setmixer(devc, 0x10, 0x38); /* Config register 2 */
devc->supported_devices = 0;
for (i = 0; i < sizeof(smw_mix_regs); i++)
if (smw_mix_regs[i] != 0)
devc->supported_devices |= (1 << i);
devc->supported_rec_devices = devc->supported_devices &
~(SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_PCM | SOUND_MASK_VOLUME);
sb_mixer_reset(devc);
}
int sb_common_mixer_set(sb_devc * devc, int dev, int left, int right)
{
int regoffs;
unsigned char val;
if ((dev < 0) || (dev >= devc->iomap_sz))
return -EINVAL;
regoffs = (*devc->iomap)[dev][LEFT_CHN].regno;
if (regoffs == 0)
return -EINVAL;
val = sb_getmixer(devc, regoffs);
oss_change_bits(devc, &val, dev, LEFT_CHN, left);
if ((*devc->iomap)[dev][RIGHT_CHN].regno != regoffs) /*
* Change register
*/
{
sb_setmixer(devc, regoffs, val); /*
* Save the old one
*/
regoffs = (*devc->iomap)[dev][RIGHT_CHN].regno;
if (regoffs == 0)
return left | (left << 8); /*
* Just left channel present
*/
val = sb_getmixer(devc, regoffs); /*
* Read the new one
*/
}
oss_change_bits(devc, &val, dev, RIGHT_CHN, right);
sb_setmixer(devc, regoffs, val);
return left | (right << 8);
}
static int smw_mixer_set(sb_devc * devc, int dev, int left, int right)
{
int reg, val;
switch (dev)
{
case SOUND_MIXER_VOLUME:
sb_setmixer(devc, 0x0b, 96 - (96 * left / 100)); /* 96=mute, 0=max */
sb_setmixer(devc, 0x0c, 96 - (96 * right / 100));
break;
case SOUND_MIXER_BASS:
case SOUND_MIXER_TREBLE:
devc->levels[dev] = left | (right << 8);
/* Set left bass and treble values */
val = ((devc->levels[SOUND_MIXER_TREBLE] & 0xff) * 16 / (unsigned) 100) << 4;
val |= ((devc->levels[SOUND_MIXER_BASS] & 0xff) * 16 / (unsigned) 100) & 0x0f;
sb_setmixer(devc, 0x0d, val);
/* Set right bass and treble values */
val = (((devc->levels[SOUND_MIXER_TREBLE] >> 8) & 0xff) * 16 / (unsigned) 100) << 4;
val |= (((devc->levels[SOUND_MIXER_BASS] >> 8) & 0xff) * 16 / (unsigned) 100) & 0x0f;
sb_setmixer(devc, 0x0e, val);
break;
default:
/* bounds check */
if (dev < 0 || dev >= ARRAY_SIZE(smw_mix_regs))
return -EINVAL;
reg = smw_mix_regs[dev];
if (reg == 0)
return -EINVAL;
sb_setmixer(devc, reg, (24 - (24 * left / 100)) | 0x20); /* 24=mute, 0=max */
sb_setmixer(devc, reg + 1, (24 - (24 * right / 100)) | 0x40);
}
devc->levels[dev] = left | (right << 8);
return left | (right << 8);
}
static int sb_mixer_set(sb_devc * devc, int dev, int value)
{
int left = value & 0x000000ff;
int right = (value & 0x0000ff00) >> 8;
int retval;
if (left > 100)
left = 100;
if (right > 100)
right = 100;
if ((dev < 0) || (dev > 31))
return -EINVAL;
if (!(devc->supported_devices & (1 << dev))) /*
* Not supported
*/
return -EINVAL;
/* Differentiate depending on the chipsets */
switch (devc->model) {
case MDL_SMW:
retval = smw_mixer_set(devc, dev, left, right);
break;
case MDL_ESS:
retval = ess_mixer_set(devc, dev, left, right);
break;
default:
retval = sb_common_mixer_set(devc, dev, left, right);
}
if (retval >= 0) devc->levels[dev] = retval;
return retval;
}
/*
* set_recsrc doesn't apply to ES188x
*/
static void set_recsrc(sb_devc * devc, int src)
{
sb_setmixer(devc, RECORD_SRC, (sb_getmixer(devc, RECORD_SRC) & ~7) | (src & 0x7));
}
static int set_recmask(sb_devc * devc, int mask)
{
int devmask, i;
unsigned char regimageL, regimageR;
devmask = mask & devc->supported_rec_devices;
switch (devc->model)
{
case MDL_SBPRO:
case MDL_ESS:
case MDL_JAZZ:
case MDL_SMW:
if (devc->model == MDL_ESS && ess_set_recmask (devc, &devmask)) {
break;
}
if (devmask != SOUND_MASK_MIC &&
devmask != SOUND_MASK_LINE &&
devmask != SOUND_MASK_CD)
{
/*
* More than one device selected. Drop the
* previous selection
*/
devmask &= ~devc->recmask;
}
if (devmask != SOUND_MASK_MIC &&
devmask != SOUND_MASK_LINE &&
devmask != SOUND_MASK_CD)
{
/*
* More than one device selected. Default to
* mic
*/
devmask = SOUND_MASK_MIC;
}
if (devmask ^ devc->recmask) /*
* Input source changed
*/
{
switch (devmask)
{
case SOUND_MASK_MIC:
set_recsrc(devc, SRC__MIC);
break;
case SOUND_MASK_LINE:
set_recsrc(devc, SRC__LINE);
break;
case SOUND_MASK_CD:
set_recsrc(devc, SRC__CD);
break;
default:
set_recsrc(devc, SRC__MIC);
}
}
break;
case MDL_SB16:
if (!devmask)
devmask = SOUND_MASK_MIC;
if (devc->submodel == SUBMDL_ALS007)
{
switch (devmask)
{
case SOUND_MASK_LINE:
sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_LINE);
break;
case SOUND_MASK_CD:
sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_CD);
break;
case SOUND_MASK_SYNTH:
sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_SYNTH);
break;
default: /* Also takes care of SOUND_MASK_MIC case */
sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_MIC);
break;
}
}
else
{
regimageL = regimageR = 0;
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
{
if ((1 << i) & devmask)
{
regimageL |= sb16_recmasks_L[i];
regimageR |= sb16_recmasks_R[i];
}
sb_setmixer (devc, SB16_IMASK_L, regimageL);
sb_setmixer (devc, SB16_IMASK_R, regimageR);
}
}
break;
}
devc->recmask = devmask;
return devc->recmask;
}
static int set_outmask(sb_devc * devc, int mask)
{
int devmask, i;
unsigned char regimage;
devmask = mask & devc->supported_out_devices;
switch (devc->model)
{
case MDL_SB16:
if (devc->submodel == SUBMDL_ALS007)
break;
else
{
regimage = 0;
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
{
if ((1 << i) & devmask)
{
regimage |= (sb16_recmasks_L[i] | sb16_recmasks_R[i]);
}
sb_setmixer (devc, SB16_OMASK, regimage);
}
}
break;
default:
break;
}
devc->outmask = devmask;
return devc->outmask;
}
static int sb_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
{
sb_devc *devc = mixer_devs[dev]->devc;
int val, ret;
int __user *p = arg;
/*
* Use ioctl(fd, SOUND_MIXER_AGC, &mode) to turn AGC off (0) or on (1).
* Use ioctl(fd, SOUND_MIXER_3DSE, &mode) to turn 3DSE off (0) or on (1)
* or mode==2 put 3DSE state to mode.
*/
if (devc->model == MDL_SB16) {
if (cmd == SOUND_MIXER_AGC)
{
if (get_user(val, p))
return -EFAULT;
sb_setmixer(devc, 0x43, (~val) & 0x01);
return 0;
}
if (cmd == SOUND_MIXER_3DSE)
{
/* I put here 15, but I don't know the exact version.
At least my 4.13 havn't 3DSE, 4.16 has it. */
if (devc->minor < 15)
return -EINVAL;
if (get_user(val, p))
return -EFAULT;
if (val == 0 || val == 1)
sb_chgmixer(devc, AWE_3DSE, 0x01, val);
else if (val == 2)
{
ret = sb_getmixer(devc, AWE_3DSE)&0x01;
return put_user(ret, p);
}
else
return -EINVAL;
return 0;
}
}
if (((cmd >> 8) & 0xff) == 'M')
{
if (_SIOC_DIR(cmd) & _SIOC_WRITE)
{
if (get_user(val, p))
return -EFAULT;
switch (cmd & 0xff)
{
case SOUND_MIXER_RECSRC:
ret = set_recmask(devc, val);
break;
case SOUND_MIXER_OUTSRC:
ret = set_outmask(devc, val);
break;
default:
ret = sb_mixer_set(devc, cmd & 0xff, val);
}
}
else switch (cmd & 0xff)
{
case SOUND_MIXER_RECSRC:
ret = devc->recmask;
break;
case SOUND_MIXER_OUTSRC:
ret = devc->outmask;
break;
case SOUND_MIXER_DEVMASK:
ret = devc->supported_devices;
break;
case SOUND_MIXER_STEREODEVS:
ret = devc->supported_devices;
/* The ESS seems to have stereo mic controls */
if (devc->model == MDL_ESS)
ret &= ~(SOUND_MASK_SPEAKER|SOUND_MASK_IMIX);
else if (devc->model != MDL_JAZZ && devc->model != MDL_SMW)
ret &= ~(SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IMIX);
break;
case SOUND_MIXER_RECMASK:
ret = devc->supported_rec_devices;
break;
case SOUND_MIXER_OUTMASK:
ret = devc->supported_out_devices;
break;
case SOUND_MIXER_CAPS:
ret = devc->mixer_caps;
break;
default:
ret = sb_mixer_get(devc, cmd & 0xff);
break;
}
return put_user(ret, p);
} else
return -EINVAL;
}
static struct mixer_operations sb_mixer_operations =
{
.owner = THIS_MODULE,
.id = "SB",
.name = "Sound Blaster",
.ioctl = sb_mixer_ioctl
};
static struct mixer_operations als007_mixer_operations =
{
.owner = THIS_MODULE,
.id = "ALS007",
.name = "Avance ALS-007",
.ioctl = sb_mixer_ioctl
};
static void sb_mixer_reset(sb_devc * devc)
{
char name[32];
int i;
sprintf(name, "SB_%d", devc->sbmixnum);
if (devc->sbmo.sm_games)
devc->levels = load_mixer_volumes(name, smg_default_levels, 1);
else
devc->levels = load_mixer_volumes(name, sb_default_levels, 1);
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
sb_mixer_set(devc, i, devc->levels[i]);
if (devc->model != MDL_ESS || !ess_mixer_reset (devc)) {
set_recmask(devc, SOUND_MASK_MIC);
}
}
int sb_mixer_init(sb_devc * devc, struct module *owner)
{
int mixer_type = 0;
int m;
devc->sbmixnum = sbmixnum++;
devc->levels = NULL;
sb_setmixer(devc, 0x00, 0); /* Reset mixer */
if (!(mixer_type = detect_mixer(devc)))
return 0; /* No mixer. Why? */
switch (devc->model)
{
case MDL_ESSPCI:
case MDL_YMPCI:
case MDL_SBPRO:
case MDL_AZTECH:
case MDL_JAZZ:
devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
devc->supported_devices = SBPRO_MIXER_DEVICES;
devc->supported_rec_devices = SBPRO_RECORDING_DEVICES;
devc->iomap = &sbpro_mix;
devc->iomap_sz = ARRAY_SIZE(sbpro_mix);
break;
case MDL_ESS:
ess_mixer_init (devc);
break;
case MDL_SMW:
devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
devc->supported_devices = 0;
devc->supported_rec_devices = 0;
devc->iomap = &sbpro_mix;
devc->iomap_sz = ARRAY_SIZE(sbpro_mix);
smw_mixer_init(devc);
break;
case MDL_SB16:
devc->mixer_caps = 0;
devc->supported_rec_devices = SB16_RECORDING_DEVICES;
devc->supported_out_devices = SB16_OUTFILTER_DEVICES;
if (devc->submodel != SUBMDL_ALS007)
{
devc->supported_devices = SB16_MIXER_DEVICES;
devc->iomap = &sb16_mix;
devc->iomap_sz = ARRAY_SIZE(sb16_mix);
}
else
{
devc->supported_devices = ALS007_MIXER_DEVICES;
devc->iomap = &als007_mix;
devc->iomap_sz = ARRAY_SIZE(als007_mix);
}
break;
default:
printk(KERN_WARNING "sb_mixer: Unsupported mixer type %d\n", devc->model);
return 0;
}
m = sound_alloc_mixerdev();
if (m == -1)
return 0;
mixer_devs[m] = kmalloc(sizeof(struct mixer_operations), GFP_KERNEL);
if (mixer_devs[m] == NULL)
{
printk(KERN_ERR "sb_mixer: Can't allocate memory\n");
sound_unload_mixerdev(m);
return 0;
}
if (devc->submodel != SUBMDL_ALS007)
memcpy ((char *) mixer_devs[m], (char *) &sb_mixer_operations, sizeof (struct mixer_operations));
else
memcpy ((char *) mixer_devs[m], (char *) &als007_mixer_operations, sizeof (struct mixer_operations));
mixer_devs[m]->devc = devc;
if (owner)
mixer_devs[m]->owner = owner;
devc->my_mixerdev = m;
sb_mixer_reset(devc);
return 1;
}
void sb_mixer_unload(sb_devc *devc)
{
if (devc->my_mixerdev == -1)
return;
kfree(mixer_devs[devc->my_mixerdev]);
sound_unload_mixerdev(devc->my_mixerdev);
sbmixnum--;
}
| gpl-2.0 |
dsb9938/DNA_JB_KERNEL_2 | sound/drivers/dummy.c | 4939 | 31003 | /*
* Dummy soundcard
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/hrtimer.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Dummy soundcard (/dev/null)");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}");
#define MAX_PCM_DEVICES 4
#define MAX_PCM_SUBSTREAMS 128
#define MAX_MIDI_DEVICES 2
/* defaults */
#define MAX_BUFFER_SIZE (64*1024)
#define MIN_PERIOD_SIZE 64
#define MAX_PERIOD_SIZE MAX_BUFFER_SIZE
#define USE_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE)
#define USE_RATE SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000
#define USE_RATE_MIN 5500
#define USE_RATE_MAX 48000
#define USE_CHANNELS_MIN 1
#define USE_CHANNELS_MAX 2
#define USE_PERIODS_MIN 1
#define USE_PERIODS_MAX 1024
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
static char *model[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = NULL};
static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8};
//static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
#ifdef CONFIG_HIGH_RES_TIMERS
static bool hrtimer = 1;
#endif
static bool fake_buffer = 1;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for dummy soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for dummy soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable this dummy soundcard.");
module_param_array(model, charp, NULL, 0444);
MODULE_PARM_DESC(model, "Soundcard model.");
module_param_array(pcm_devs, int, NULL, 0444);
MODULE_PARM_DESC(pcm_devs, "PCM devices # (0-4) for dummy driver.");
module_param_array(pcm_substreams, int, NULL, 0444);
MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
//module_param_array(midi_devs, int, NULL, 0444);
//MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver.");
module_param(fake_buffer, bool, 0444);
MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
#ifdef CONFIG_HIGH_RES_TIMERS
module_param(hrtimer, bool, 0644);
MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
#endif
static struct platform_device *devices[SNDRV_CARDS];
#define MIXER_ADDR_MASTER 0
#define MIXER_ADDR_LINE 1
#define MIXER_ADDR_MIC 2
#define MIXER_ADDR_SYNTH 3
#define MIXER_ADDR_CD 4
#define MIXER_ADDR_LAST 4
struct dummy_timer_ops {
int (*create)(struct snd_pcm_substream *);
void (*free)(struct snd_pcm_substream *);
int (*prepare)(struct snd_pcm_substream *);
int (*start)(struct snd_pcm_substream *);
int (*stop)(struct snd_pcm_substream *);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
};
struct dummy_model {
const char *name;
int (*playback_constraints)(struct snd_pcm_runtime *runtime);
int (*capture_constraints)(struct snd_pcm_runtime *runtime);
u64 formats;
size_t buffer_bytes_max;
size_t period_bytes_min;
size_t period_bytes_max;
unsigned int periods_min;
unsigned int periods_max;
unsigned int rates;
unsigned int rate_min;
unsigned int rate_max;
unsigned int channels_min;
unsigned int channels_max;
};
struct snd_dummy {
struct snd_card *card;
struct dummy_model *model;
struct snd_pcm *pcm;
struct snd_pcm_hardware pcm_hw;
spinlock_t mixer_lock;
int mixer_volume[MIXER_ADDR_LAST+1][2];
int capture_source[MIXER_ADDR_LAST+1][2];
const struct dummy_timer_ops *timer_ops;
};
/*
* card models
*/
static int emu10k1_playback_constraints(struct snd_pcm_runtime *runtime)
{
int err;
err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX);
if (err < 0)
return err;
return 0;
}
struct dummy_model model_emu10k1 = {
.name = "emu10k1",
.playback_constraints = emu10k1_playback_constraints,
.buffer_bytes_max = 128 * 1024,
};
struct dummy_model model_rme9652 = {
.name = "rme9652",
.buffer_bytes_max = 26 * 64 * 1024,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 26,
.channels_max = 26,
.periods_min = 2,
.periods_max = 2,
};
struct dummy_model model_ice1712 = {
.name = "ice1712",
.buffer_bytes_max = 256 * 1024,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 10,
.channels_max = 10,
.periods_min = 1,
.periods_max = 1024,
};
struct dummy_model model_uda1341 = {
.name = "uda1341",
.buffer_bytes_max = 16380,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 2,
.channels_max = 2,
.periods_min = 2,
.periods_max = 255,
};
struct dummy_model model_ac97 = {
.name = "ac97",
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
};
struct dummy_model model_ca0106 = {
.name = "ca0106",
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.buffer_bytes_max = ((65536-64)*8),
.period_bytes_max = (65536-64),
.periods_min = 2,
.periods_max = 8,
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_96000|SNDRV_PCM_RATE_192000,
.rate_min = 48000,
.rate_max = 192000,
};
struct dummy_model *dummy_models[] = {
&model_emu10k1,
&model_rme9652,
&model_ice1712,
&model_uda1341,
&model_ac97,
&model_ca0106,
NULL
};
/*
* system timer interface
*/
struct dummy_systimer_pcm {
spinlock_t lock;
struct timer_list timer;
unsigned long base_time;
unsigned int frac_pos; /* fractional sample position (based HZ) */
unsigned int frac_period_rest;
unsigned int frac_buffer_size; /* buffer_size * HZ */
unsigned int frac_period_size; /* period_size * HZ */
unsigned int rate;
int elapsed;
struct snd_pcm_substream *substream;
};
static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm)
{
dpcm->timer.expires = jiffies +
(dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate;
add_timer(&dpcm->timer);
}
static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm)
{
unsigned long delta;
delta = jiffies - dpcm->base_time;
if (!delta)
return;
dpcm->base_time += delta;
delta *= dpcm->rate;
dpcm->frac_pos += delta;
while (dpcm->frac_pos >= dpcm->frac_buffer_size)
dpcm->frac_pos -= dpcm->frac_buffer_size;
while (dpcm->frac_period_rest <= delta) {
dpcm->elapsed++;
dpcm->frac_period_rest += dpcm->frac_period_size;
}
dpcm->frac_period_rest -= delta;
}
static int dummy_systimer_start(struct snd_pcm_substream *substream)
{
struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
spin_lock(&dpcm->lock);
dpcm->base_time = jiffies;
dummy_systimer_rearm(dpcm);
spin_unlock(&dpcm->lock);
return 0;
}
static int dummy_systimer_stop(struct snd_pcm_substream *substream)
{
struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
spin_lock(&dpcm->lock);
del_timer(&dpcm->timer);
spin_unlock(&dpcm->lock);
return 0;
}
static int dummy_systimer_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dummy_systimer_pcm *dpcm = runtime->private_data;
dpcm->frac_pos = 0;
dpcm->rate = runtime->rate;
dpcm->frac_buffer_size = runtime->buffer_size * HZ;
dpcm->frac_period_size = runtime->period_size * HZ;
dpcm->frac_period_rest = dpcm->frac_period_size;
dpcm->elapsed = 0;
return 0;
}
static void dummy_systimer_callback(unsigned long data)
{
struct dummy_systimer_pcm *dpcm = (struct dummy_systimer_pcm *)data;
unsigned long flags;
int elapsed = 0;
spin_lock_irqsave(&dpcm->lock, flags);
dummy_systimer_update(dpcm);
dummy_systimer_rearm(dpcm);
elapsed = dpcm->elapsed;
dpcm->elapsed = 0;
spin_unlock_irqrestore(&dpcm->lock, flags);
if (elapsed)
snd_pcm_period_elapsed(dpcm->substream);
}
static snd_pcm_uframes_t
dummy_systimer_pointer(struct snd_pcm_substream *substream)
{
struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
snd_pcm_uframes_t pos;
spin_lock(&dpcm->lock);
dummy_systimer_update(dpcm);
pos = dpcm->frac_pos / HZ;
spin_unlock(&dpcm->lock);
return pos;
}
static int dummy_systimer_create(struct snd_pcm_substream *substream)
{
struct dummy_systimer_pcm *dpcm;
dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
init_timer(&dpcm->timer);
dpcm->timer.data = (unsigned long) dpcm;
dpcm->timer.function = dummy_systimer_callback;
spin_lock_init(&dpcm->lock);
dpcm->substream = substream;
return 0;
}
static void dummy_systimer_free(struct snd_pcm_substream *substream)
{
kfree(substream->runtime->private_data);
}
static struct dummy_timer_ops dummy_systimer_ops = {
.create = dummy_systimer_create,
.free = dummy_systimer_free,
.prepare = dummy_systimer_prepare,
.start = dummy_systimer_start,
.stop = dummy_systimer_stop,
.pointer = dummy_systimer_pointer,
};
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* hrtimer interface
*/
struct dummy_hrtimer_pcm {
ktime_t base_time;
ktime_t period_time;
atomic_t running;
struct hrtimer timer;
struct tasklet_struct tasklet;
struct snd_pcm_substream *substream;
};
static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
{
struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
if (atomic_read(&dpcm->running))
snd_pcm_period_elapsed(dpcm->substream);
}
static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
{
struct dummy_hrtimer_pcm *dpcm;
dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART;
tasklet_schedule(&dpcm->tasklet);
hrtimer_forward_now(timer, dpcm->period_time);
return HRTIMER_RESTART;
}
static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
{
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
atomic_set(&dpcm->running, 1);
return 0;
}
static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
{
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
atomic_set(&dpcm->running, 0);
hrtimer_cancel(&dpcm->timer);
return 0;
}
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{
tasklet_kill(&dpcm->tasklet);
}
static snd_pcm_uframes_t
dummy_hrtimer_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
u64 delta;
u32 pos;
delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer),
dpcm->base_time);
delta = div_u64(delta * runtime->rate + 999999, 1000000);
div_u64_rem(delta, runtime->buffer_size, &pos);
return pos;
}
static int dummy_hrtimer_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
unsigned int period, rate;
long sec;
unsigned long nsecs;
dummy_hrtimer_sync(dpcm);
period = runtime->period_size;
rate = runtime->rate;
sec = period / rate;
period %= rate;
nsecs = div_u64((u64)period * 1000000000UL + rate - 1, rate);
dpcm->period_time = ktime_set(sec, nsecs);
return 0;
}
static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
{
struct dummy_hrtimer_pcm *dpcm;
dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
dpcm->timer.function = dummy_hrtimer_callback;
dpcm->substream = substream;
atomic_set(&dpcm->running, 0);
tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
(unsigned long)dpcm);
return 0;
}
static void dummy_hrtimer_free(struct snd_pcm_substream *substream)
{
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dummy_hrtimer_sync(dpcm);
kfree(dpcm);
}
static struct dummy_timer_ops dummy_hrtimer_ops = {
.create = dummy_hrtimer_create,
.free = dummy_hrtimer_free,
.prepare = dummy_hrtimer_prepare,
.start = dummy_hrtimer_start,
.stop = dummy_hrtimer_stop,
.pointer = dummy_hrtimer_pointer,
};
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
* PCM interface
*/
static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
return dummy->timer_ops->start(substream);
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
return dummy->timer_ops->stop(substream);
}
return -EINVAL;
}
static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
return dummy->timer_ops->prepare(substream);
}
static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
return dummy->timer_ops->pointer(substream);
}
static struct snd_pcm_hardware dummy_pcm_hardware = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = USE_FORMATS,
.rates = USE_RATE,
.rate_min = USE_RATE_MIN,
.rate_max = USE_RATE_MAX,
.channels_min = USE_CHANNELS_MIN,
.channels_max = USE_CHANNELS_MAX,
.buffer_bytes_max = MAX_BUFFER_SIZE,
.period_bytes_min = MIN_PERIOD_SIZE,
.period_bytes_max = MAX_PERIOD_SIZE,
.periods_min = USE_PERIODS_MIN,
.periods_max = USE_PERIODS_MAX,
.fifo_size = 0,
};
static int dummy_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
if (fake_buffer) {
/* runtime->dma_bytes has to be set manually to allow mmap */
substream->runtime->dma_bytes = params_buffer_bytes(hw_params);
return 0;
}
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
static int dummy_pcm_hw_free(struct snd_pcm_substream *substream)
{
if (fake_buffer)
return 0;
return snd_pcm_lib_free_pages(substream);
}
static int dummy_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
struct dummy_model *model = dummy->model;
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
dummy->timer_ops = &dummy_systimer_ops;
#ifdef CONFIG_HIGH_RES_TIMERS
if (hrtimer)
dummy->timer_ops = &dummy_hrtimer_ops;
#endif
err = dummy->timer_ops->create(substream);
if (err < 0)
return err;
runtime->hw = dummy->pcm_hw;
if (substream->pcm->device & 1) {
runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
}
if (substream->pcm->device & 2)
runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID);
if (model == NULL)
return 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (model->playback_constraints)
err = model->playback_constraints(substream->runtime);
} else {
if (model->capture_constraints)
err = model->capture_constraints(substream->runtime);
}
if (err < 0) {
dummy->timer_ops->free(substream);
return err;
}
return 0;
}
static int dummy_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
dummy->timer_ops->free(substream);
return 0;
}
/*
* dummy buffer handling
*/
static void *dummy_page[2];
static void free_fake_buffer(void)
{
if (fake_buffer) {
int i;
for (i = 0; i < 2; i++)
if (dummy_page[i]) {
free_page((unsigned long)dummy_page[i]);
dummy_page[i] = NULL;
}
}
}
static int alloc_fake_buffer(void)
{
int i;
if (!fake_buffer)
return 0;
for (i = 0; i < 2; i++) {
dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!dummy_page[i]) {
free_fake_buffer();
return -ENOMEM;
}
}
return 0;
}
static int dummy_pcm_copy(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t pos,
void __user *dst, snd_pcm_uframes_t count)
{
return 0; /* do nothing */
}
static int dummy_pcm_silence(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t pos,
snd_pcm_uframes_t count)
{
return 0; /* do nothing */
}
static struct page *dummy_pcm_page(struct snd_pcm_substream *substream,
unsigned long offset)
{
return virt_to_page(dummy_page[substream->stream]); /* the same page */
}
static struct snd_pcm_ops dummy_pcm_ops = {
.open = dummy_pcm_open,
.close = dummy_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = dummy_pcm_hw_params,
.hw_free = dummy_pcm_hw_free,
.prepare = dummy_pcm_prepare,
.trigger = dummy_pcm_trigger,
.pointer = dummy_pcm_pointer,
};
static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
.open = dummy_pcm_open,
.close = dummy_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = dummy_pcm_hw_params,
.hw_free = dummy_pcm_hw_free,
.prepare = dummy_pcm_prepare,
.trigger = dummy_pcm_trigger,
.pointer = dummy_pcm_pointer,
.copy = dummy_pcm_copy,
.silence = dummy_pcm_silence,
.page = dummy_pcm_page,
};
static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
int substreams)
{
struct snd_pcm *pcm;
struct snd_pcm_ops *ops;
int err;
err = snd_pcm_new(dummy->card, "Dummy PCM", device,
substreams, substreams, &pcm);
if (err < 0)
return err;
dummy->pcm = pcm;
if (fake_buffer)
ops = &dummy_pcm_ops_no_buf;
else
ops = &dummy_pcm_ops;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops);
pcm->private_data = dummy;
pcm->info_flags = 0;
strcpy(pcm->name, "Dummy PCM");
if (!fake_buffer) {
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL),
0, 64*1024);
}
return 0;
}
/*
* mixer interface
*/
#define DUMMY_VOLUME(xname, xindex, addr) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .index = xindex, \
.info = snd_dummy_volume_info, \
.get = snd_dummy_volume_get, .put = snd_dummy_volume_put, \
.private_value = addr, \
.tlv = { .p = db_scale_dummy } }
static int snd_dummy_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = -50;
uinfo->value.integer.max = 100;
return 0;
}
static int snd_dummy_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
int addr = kcontrol->private_value;
spin_lock_irq(&dummy->mixer_lock);
ucontrol->value.integer.value[0] = dummy->mixer_volume[addr][0];
ucontrol->value.integer.value[1] = dummy->mixer_volume[addr][1];
spin_unlock_irq(&dummy->mixer_lock);
return 0;
}
static int snd_dummy_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
int change, addr = kcontrol->private_value;
int left, right;
left = ucontrol->value.integer.value[0];
if (left < -50)
left = -50;
if (left > 100)
left = 100;
right = ucontrol->value.integer.value[1];
if (right < -50)
right = -50;
if (right > 100)
right = 100;
spin_lock_irq(&dummy->mixer_lock);
change = dummy->mixer_volume[addr][0] != left ||
dummy->mixer_volume[addr][1] != right;
dummy->mixer_volume[addr][0] = left;
dummy->mixer_volume[addr][1] = right;
spin_unlock_irq(&dummy->mixer_lock);
return change;
}
static const DECLARE_TLV_DB_SCALE(db_scale_dummy, -4500, 30, 0);
#define DUMMY_CAPSRC(xname, xindex, addr) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_dummy_capsrc_info, \
.get = snd_dummy_capsrc_get, .put = snd_dummy_capsrc_put, \
.private_value = addr }
#define snd_dummy_capsrc_info snd_ctl_boolean_stereo_info
static int snd_dummy_capsrc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
int addr = kcontrol->private_value;
spin_lock_irq(&dummy->mixer_lock);
ucontrol->value.integer.value[0] = dummy->capture_source[addr][0];
ucontrol->value.integer.value[1] = dummy->capture_source[addr][1];
spin_unlock_irq(&dummy->mixer_lock);
return 0;
}
static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
int change, addr = kcontrol->private_value;
int left, right;
left = ucontrol->value.integer.value[0] & 1;
right = ucontrol->value.integer.value[1] & 1;
spin_lock_irq(&dummy->mixer_lock);
change = dummy->capture_source[addr][0] != left &&
dummy->capture_source[addr][1] != right;
dummy->capture_source[addr][0] = left;
dummy->capture_source[addr][1] = right;
spin_unlock_irq(&dummy->mixer_lock);
return change;
}
static struct snd_kcontrol_new snd_dummy_controls[] = {
DUMMY_VOLUME("Master Volume", 0, MIXER_ADDR_MASTER),
DUMMY_CAPSRC("Master Capture Switch", 0, MIXER_ADDR_MASTER),
DUMMY_VOLUME("Synth Volume", 0, MIXER_ADDR_SYNTH),
DUMMY_CAPSRC("Synth Capture Switch", 0, MIXER_ADDR_SYNTH),
DUMMY_VOLUME("Line Volume", 0, MIXER_ADDR_LINE),
DUMMY_CAPSRC("Line Capture Switch", 0, MIXER_ADDR_LINE),
DUMMY_VOLUME("Mic Volume", 0, MIXER_ADDR_MIC),
DUMMY_CAPSRC("Mic Capture Switch", 0, MIXER_ADDR_MIC),
DUMMY_VOLUME("CD Volume", 0, MIXER_ADDR_CD),
DUMMY_CAPSRC("CD Capture Switch", 0, MIXER_ADDR_CD)
};
static int __devinit snd_card_dummy_new_mixer(struct snd_dummy *dummy)
{
struct snd_card *card = dummy->card;
unsigned int idx;
int err;
spin_lock_init(&dummy->mixer_lock);
strcpy(card->mixername, "Dummy Mixer");
for (idx = 0; idx < ARRAY_SIZE(snd_dummy_controls); idx++) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_dummy_controls[idx], dummy));
if (err < 0)
return err;
}
return 0;
}
#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_PROC_FS)
/*
* proc interface
*/
static void print_formats(struct snd_dummy *dummy,
struct snd_info_buffer *buffer)
{
int i;
for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
if (dummy->pcm_hw.formats & (1ULL << i))
snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
}
}
static void print_rates(struct snd_dummy *dummy,
struct snd_info_buffer *buffer)
{
static int rates[] = {
5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000,
64000, 88200, 96000, 176400, 192000,
};
int i;
if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_CONTINUOUS)
snd_iprintf(buffer, " continuous");
if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_KNOT)
snd_iprintf(buffer, " knot");
for (i = 0; i < ARRAY_SIZE(rates); i++)
if (dummy->pcm_hw.rates & (1 << i))
snd_iprintf(buffer, " %d", rates[i]);
}
#define get_dummy_int_ptr(dummy, ofs) \
(unsigned int *)((char *)&((dummy)->pcm_hw) + (ofs))
#define get_dummy_ll_ptr(dummy, ofs) \
(unsigned long long *)((char *)&((dummy)->pcm_hw) + (ofs))
struct dummy_hw_field {
const char *name;
const char *format;
unsigned int offset;
unsigned int size;
};
#define FIELD_ENTRY(item, fmt) { \
.name = #item, \
.format = fmt, \
.offset = offsetof(struct snd_pcm_hardware, item), \
.size = sizeof(dummy_pcm_hardware.item) }
static struct dummy_hw_field fields[] = {
FIELD_ENTRY(formats, "%#llx"),
FIELD_ENTRY(rates, "%#x"),
FIELD_ENTRY(rate_min, "%d"),
FIELD_ENTRY(rate_max, "%d"),
FIELD_ENTRY(channels_min, "%d"),
FIELD_ENTRY(channels_max, "%d"),
FIELD_ENTRY(buffer_bytes_max, "%ld"),
FIELD_ENTRY(period_bytes_min, "%ld"),
FIELD_ENTRY(period_bytes_max, "%ld"),
FIELD_ENTRY(periods_min, "%d"),
FIELD_ENTRY(periods_max, "%d"),
};
static void dummy_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_dummy *dummy = entry->private_data;
int i;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
snd_iprintf(buffer, "%s ", fields[i].name);
if (fields[i].size == sizeof(int))
snd_iprintf(buffer, fields[i].format,
*get_dummy_int_ptr(dummy, fields[i].offset));
else
snd_iprintf(buffer, fields[i].format,
*get_dummy_ll_ptr(dummy, fields[i].offset));
if (!strcmp(fields[i].name, "formats"))
print_formats(dummy, buffer);
else if (!strcmp(fields[i].name, "rates"))
print_rates(dummy, buffer);
snd_iprintf(buffer, "\n");
}
}
static void dummy_proc_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_dummy *dummy = entry->private_data;
char line[64];
while (!snd_info_get_line(buffer, line, sizeof(line))) {
char item[20];
const char *ptr;
unsigned long long val;
int i;
ptr = snd_info_get_str(item, line, sizeof(item));
for (i = 0; i < ARRAY_SIZE(fields); i++) {
if (!strcmp(item, fields[i].name))
break;
}
if (i >= ARRAY_SIZE(fields))
continue;
snd_info_get_str(item, ptr, sizeof(item));
if (strict_strtoull(item, 0, &val))
continue;
if (fields[i].size == sizeof(int))
*get_dummy_int_ptr(dummy, fields[i].offset) = val;
else
*get_dummy_ll_ptr(dummy, fields[i].offset) = val;
}
}
static void __devinit dummy_proc_init(struct snd_dummy *chip)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(chip->card, "dummy_pcm", &entry)) {
snd_info_set_text_ops(entry, chip, dummy_proc_read);
entry->c.text.write = dummy_proc_write;
entry->mode |= S_IWUSR;
entry->private_data = chip;
}
}
#else
#define dummy_proc_init(x)
#endif /* CONFIG_SND_DEBUG && CONFIG_PROC_FS */
static int __devinit snd_dummy_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct snd_dummy *dummy;
struct dummy_model *m = NULL, **mdl;
int idx, err;
int dev = devptr->id;
err = snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_dummy), &card);
if (err < 0)
return err;
dummy = card->private_data;
dummy->card = card;
for (mdl = dummy_models; *mdl && model[dev]; mdl++) {
if (strcmp(model[dev], (*mdl)->name) == 0) {
printk(KERN_INFO
"snd-dummy: Using model '%s' for card %i\n",
(*mdl)->name, card->number);
m = dummy->model = *mdl;
break;
}
}
for (idx = 0; idx < MAX_PCM_DEVICES && idx < pcm_devs[dev]; idx++) {
if (pcm_substreams[dev] < 1)
pcm_substreams[dev] = 1;
if (pcm_substreams[dev] > MAX_PCM_SUBSTREAMS)
pcm_substreams[dev] = MAX_PCM_SUBSTREAMS;
err = snd_card_dummy_pcm(dummy, idx, pcm_substreams[dev]);
if (err < 0)
goto __nodev;
}
dummy->pcm_hw = dummy_pcm_hardware;
if (m) {
if (m->formats)
dummy->pcm_hw.formats = m->formats;
if (m->buffer_bytes_max)
dummy->pcm_hw.buffer_bytes_max = m->buffer_bytes_max;
if (m->period_bytes_min)
dummy->pcm_hw.period_bytes_min = m->period_bytes_min;
if (m->period_bytes_max)
dummy->pcm_hw.period_bytes_max = m->period_bytes_max;
if (m->periods_min)
dummy->pcm_hw.periods_min = m->periods_min;
if (m->periods_max)
dummy->pcm_hw.periods_max = m->periods_max;
if (m->rates)
dummy->pcm_hw.rates = m->rates;
if (m->rate_min)
dummy->pcm_hw.rate_min = m->rate_min;
if (m->rate_max)
dummy->pcm_hw.rate_max = m->rate_max;
if (m->channels_min)
dummy->pcm_hw.channels_min = m->channels_min;
if (m->channels_max)
dummy->pcm_hw.channels_max = m->channels_max;
}
err = snd_card_dummy_new_mixer(dummy);
if (err < 0)
goto __nodev;
strcpy(card->driver, "Dummy");
strcpy(card->shortname, "Dummy");
sprintf(card->longname, "Dummy %i", dev + 1);
dummy_proc_init(dummy);
snd_card_set_dev(card, &devptr->dev);
err = snd_card_register(card);
if (err == 0) {
platform_set_drvdata(devptr, card);
return 0;
}
__nodev:
snd_card_free(card);
return err;
}
static int __devexit snd_dummy_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
return 0;
}
#ifdef CONFIG_PM
static int snd_dummy_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_card *card = platform_get_drvdata(pdev);
struct snd_dummy *dummy = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(dummy->pcm);
return 0;
}
static int snd_dummy_resume(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif
#define SND_DUMMY_DRIVER "snd_dummy"
static struct platform_driver snd_dummy_driver = {
.probe = snd_dummy_probe,
.remove = __devexit_p(snd_dummy_remove),
#ifdef CONFIG_PM
.suspend = snd_dummy_suspend,
.resume = snd_dummy_resume,
#endif
.driver = {
.name = SND_DUMMY_DRIVER
},
};
static void snd_dummy_unregister_all(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(devices); ++i)
platform_device_unregister(devices[i]);
platform_driver_unregister(&snd_dummy_driver);
free_fake_buffer();
}
static int __init alsa_card_dummy_init(void)
{
int i, cards, err;
err = platform_driver_register(&snd_dummy_driver);
if (err < 0)
return err;
err = alloc_fake_buffer();
if (err < 0) {
platform_driver_unregister(&snd_dummy_driver);
return err;
}
cards = 0;
for (i = 0; i < SNDRV_CARDS; i++) {
struct platform_device *device;
if (! enable[i])
continue;
device = platform_device_register_simple(SND_DUMMY_DRIVER,
i, NULL, 0);
if (IS_ERR(device))
continue;
if (!platform_get_drvdata(device)) {
platform_device_unregister(device);
continue;
}
devices[i] = device;
cards++;
}
if (!cards) {
#ifdef MODULE
printk(KERN_ERR "Dummy soundcard not found or device busy\n");
#endif
snd_dummy_unregister_all();
return -ENODEV;
}
return 0;
}
static void __exit alsa_card_dummy_exit(void)
{
snd_dummy_unregister_all();
}
module_init(alsa_card_dummy_init)
module_exit(alsa_card_dummy_exit)
| gpl-2.0 |
flintman/android_kernel_htc_msm8660-3.4 | fs/nfs/pnfs_dev.c | 4939 | 7472 | /*
* Device operations for the pnfs client.
*
* Copyright (c) 2002
* The Regents of the University of Michigan
* All Rights Reserved
*
* Dean Hildebrand <dhildebz@umich.edu>
* Garth Goodson <Garth.Goodson@netapp.com>
*
* Permission is granted to use, copy, create derivative works, and
* redistribute this software and such derivative works for any purpose,
* so long as the name of the University of Michigan is not used in
* any advertising or publicity pertaining to the use or distribution
* of this software without specific, written prior authorization. If
* the above copyright notice or any other identification of the
* University of Michigan is included in any copy of any portion of
* this software, then the disclaimer below must also be included.
*
* This software is provided as is, without representation or warranty
* of any kind either express or implied, including without limitation
* the implied warranties of merchantability, fitness for a particular
* purpose, or noninfringement. The Regents of the University of
* Michigan shall not be liable for any damages, including special,
* indirect, incidental, or consequential damages, with respect to any
* claim arising out of or in connection with the use of the software,
* even if it has been or is hereafter advised of the possibility of
* such damages.
*/
#include <linux/export.h>
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
/*
* Device ID RCU cache. A device ID is unique per server and layout type.
*/
#define NFS4_DEVICE_ID_HASH_BITS 5
#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
static DEFINE_SPINLOCK(nfs4_deviceid_lock);
#ifdef NFS_DEBUG
void
nfs4_print_deviceid(const struct nfs4_deviceid *id)
{
u32 *p = (u32 *)id;
dprintk("%s: device id= [%x%x%x%x]\n", __func__,
p[0], p[1], p[2], p[3]);
}
EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
#endif
static inline u32
nfs4_deviceid_hash(const struct nfs4_deviceid *id)
{
unsigned char *cptr = (unsigned char *)id->data;
unsigned int nbytes = NFS4_DEVICEID4_SIZE;
u32 x = 0;
while (nbytes--) {
x *= 37;
x += *cptr++;
}
return x & NFS4_DEVICE_ID_HASH_MASK;
}
static struct nfs4_deviceid_node *
_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
const struct nfs_client *clp, const struct nfs4_deviceid *id,
long hash)
{
struct nfs4_deviceid_node *d;
struct hlist_node *n;
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
if (d->ld == ld && d->nfs_client == clp &&
!memcmp(&d->deviceid, id, sizeof(*id))) {
if (atomic_read(&d->ref))
return d;
else
continue;
}
return NULL;
}
/*
* Lookup a deviceid in cache and get a reference count on it if found
*
* @clp nfs_client associated with deviceid
* @id deviceid to look up
*/
static struct nfs4_deviceid_node *
_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
const struct nfs_client *clp, const struct nfs4_deviceid *id,
long hash)
{
struct nfs4_deviceid_node *d;
rcu_read_lock();
d = _lookup_deviceid(ld, clp, id, hash);
if (d != NULL)
atomic_inc(&d->ref);
rcu_read_unlock();
return d;
}
struct nfs4_deviceid_node *
nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
const struct nfs_client *clp, const struct nfs4_deviceid *id)
{
return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
}
EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
/*
* Remove a deviceid from cache
*
* @clp nfs_client associated with deviceid
* @id the deviceid to unhash
*
* @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
*/
void
nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
const struct nfs_client *clp, const struct nfs4_deviceid *id)
{
struct nfs4_deviceid_node *d;
spin_lock(&nfs4_deviceid_lock);
rcu_read_lock();
d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
rcu_read_unlock();
if (!d) {
spin_unlock(&nfs4_deviceid_lock);
return;
}
hlist_del_init_rcu(&d->node);
spin_unlock(&nfs4_deviceid_lock);
synchronize_rcu();
/* balance the initial ref set in pnfs_insert_deviceid */
if (atomic_dec_and_test(&d->ref))
d->ld->free_deviceid_node(d);
}
EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
void
nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
const struct pnfs_layoutdriver_type *ld,
const struct nfs_client *nfs_client,
const struct nfs4_deviceid *id)
{
INIT_HLIST_NODE(&d->node);
INIT_HLIST_NODE(&d->tmpnode);
d->ld = ld;
d->nfs_client = nfs_client;
d->flags = 0;
d->deviceid = *id;
atomic_set(&d->ref, 1);
}
EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
/*
* Uniquely initialize and insert a deviceid node into cache
*
* @new new deviceid node
* Note that the caller must set up the following members:
* new->ld
* new->nfs_client
* new->deviceid
*
* @ret the inserted node, if none found, otherwise, the found entry.
*/
struct nfs4_deviceid_node *
nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
{
struct nfs4_deviceid_node *d;
long hash;
spin_lock(&nfs4_deviceid_lock);
hash = nfs4_deviceid_hash(&new->deviceid);
d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
if (d) {
spin_unlock(&nfs4_deviceid_lock);
return d;
}
hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
spin_unlock(&nfs4_deviceid_lock);
atomic_inc(&new->ref);
return new;
}
EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
/*
* Dereference a deviceid node and delete it when its reference count drops
* to zero.
*
* @d deviceid node to put
*
* return true iff the node was deleted
* Note that since the test for d->ref == 0 is sufficient to establish
* that the node is no longer hashed in the global device id cache.
*/
bool
nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
{
if (!atomic_dec_and_test(&d->ref))
return false;
d->ld->free_deviceid_node(d);
return true;
}
EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
static void
_deviceid_purge_client(const struct nfs_client *clp, long hash)
{
struct nfs4_deviceid_node *d;
struct hlist_node *n;
HLIST_HEAD(tmp);
spin_lock(&nfs4_deviceid_lock);
rcu_read_lock();
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
if (d->nfs_client == clp && atomic_read(&d->ref)) {
hlist_del_init_rcu(&d->node);
hlist_add_head(&d->tmpnode, &tmp);
}
rcu_read_unlock();
spin_unlock(&nfs4_deviceid_lock);
if (hlist_empty(&tmp))
return;
synchronize_rcu();
while (!hlist_empty(&tmp)) {
d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
hlist_del(&d->tmpnode);
if (atomic_dec_and_test(&d->ref))
d->ld->free_deviceid_node(d);
}
}
void
nfs4_deviceid_purge_client(const struct nfs_client *clp)
{
long h;
if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
return;
for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
_deviceid_purge_client(clp, h);
}
/*
* Stop use of all deviceids associated with an nfs_client
*/
void
nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
{
struct nfs4_deviceid_node *d;
struct hlist_node *n;
int i;
rcu_read_lock();
for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
if (d->nfs_client == clp)
set_bit(NFS_DEVICEID_INVALID, &d->flags);
}
rcu_read_unlock();
}
| gpl-2.0 |
diego-ch/android_kernel_samsung_u8500 | arch/parisc/kernel/signal.c | 7243 | 19386 | /*
* linux/arch/parisc/kernel/signal.c: Architecture-specific signal
* handling support.
*
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
* Copyright (C) 2000 Linuxcare, Inc.
*
* Based on the ia64, i386, and alpha versions.
*
* Like the IA-64, we are a recent enough port (we are *starting*
* with glibc2.2) that we do not need to support the old non-realtime
* Linux signals. Therefore we don't. HP/UX signals will go in
* arch/parisc/hpux/signal.c when we figure out how to do them.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_COMPAT
#include "signal32.h"
#endif
#define DEBUG_SIG 0
#define DEBUG_SIG_LEVEL 2
#if DEBUG_SIG
#define DBG(LEVEL, ...) \
((DEBUG_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* gcc will complain if a pointer is cast to an integer of different
* size. If you really need to do this (and we do for an ELF32 user
* application in an ELF64 kernel) then you have to do a cast to an
* integer of the same size first. The A() macro accomplishes
* this. */
#define A(__x) ((unsigned long)(__x))
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_64BIT
#include "sys32.h"
#endif
/*
* Do a signal return - restore sigcontext.
*/
/* Trampoline for calling rt_sigreturn() */
#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */
#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
#define INSN_NOP 0x08000240 /* nop */
/* For debugging */
#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
long err = 0;
err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
err |= __get_user(regs->sar, &sc->sc_sar);
DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n",
regs->iaoq[0],regs->iaoq[1]);
DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
compat_sigset_t compat_set;
struct compat_rt_sigframe __user * compat_frame;
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
(usp - sigframe_size);
DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
goto give_sigsegv;
sigset_32to64(&set,&compat_set);
} else
#endif
{
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto give_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
/* Good thing we saved the old gr[30], eh? */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
&compat_frame->uc.uc_mcontext);
// FIXME: Load upper half from register file
if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &compat_frame->uc.uc_stack);
if (do_sigaltstack32(&compat_frame->uc.uc_stack, NULL, usp) == -EFAULT)
goto give_sigsegv;
} else
#endif
{
DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
&frame->uc.uc_mcontext);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &frame->uc.uc_stack);
if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
goto give_sigsegv;
}
/* If we are on the syscall path IAOQ will not be restored, and
* if we are on the interrupt path we must not corrupt gr31.
*/
if (in_syscall)
regs->gr[31] = regs->iaoq[0];
#if DEBUG_SIG
DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
show_regs(regs);
#endif
return;
give_sigsegv:
DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
force_sig(SIGSEGV, current);
return;
}
/*
* Set up a signal frame.
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
/*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
don't use the parameter it doesn't matter */
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
return (void __user *) sp; /* Stacks grow up. Fun. */
}
static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
{
unsigned long flags = 0;
long err = 0;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
regs->gr[31], regs->gr[31]+4);
} else {
err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n",
regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
err |= __put_user(regs->sar, &sc->sc_sar);
DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
}
static long
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
int err = 0;
#ifdef CONFIG_64BIT
compat_int_t compat_val;
struct compat_rt_sigframe __user * compat_frame;
compat_sigset_t compat_set;
#endif
usp = (regs->gr[30] & ~(0x01UL));
/*FIXME: frame_size parameter is unused, remove it. */
frame = get_sigframe(ka, usp, sizeof(*frame));
DBG(1,"SETUP_RT_FRAME: START\n");
DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info);
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
err |= copy_siginfo_to_user32(&compat_frame->info, info);
DBG(1,"SETUP_RT_FRAME: 1\n");
compat_val = (compat_int_t)current->sas_ss_sp;
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
DBG(1,"SETUP_RT_FRAME: 2\n");
compat_val = (compat_int_t)current->sas_ss_size;
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_size);
DBG(1,"SETUP_RT_FRAME: 3\n");
compat_val = sas_ss_flags(regs->gr[30]);
err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_flags);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs, in_syscall);
sigset_64to32(&compat_set,set);
err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
} else
#endif
{
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
err |= copy_siginfo_to_user(&frame->info, info);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= __put_user(sas_ss_flags(regs->gr[30]),
&frame->uc.uc_stack.ss_flags);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
/* FIXME: Should probably be converted as well for the compat case */
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
}
if (err)
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
already in userspace. The first words of tramp are used to
save the previous sigrestartblock trampoline that might be
on the stack. We start the sigreturn trampoline at
SIGRESTARTBLOCK_TRAMP+X. */
err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
err |= __put_user(INSN_LDI_R20,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
err |= __put_user(INSN_BLE_SR2_R0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
#if DEBUG_SIG
/* Assert that we're flushing in the correct space... */
{
int sid;
asm ("mfsp %%sr3,%0" : "=r" (sid));
DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
sid, frame->tramp);
}
#endif
flush_user_dcache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
flush_user_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[TRAMP_SIZE]);
/* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
* TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
* So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
*/
rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
if (err)
goto give_sigsegv;
haddr = A(ka->sa.sa_handler);
/* The sa_handler may be a pointer to a function descriptor */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
#endif
if (haddr & PA_PLABEL_FDESC) {
Elf32_Fdesc fdesc;
Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
goto give_sigsegv;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
}
#ifdef CONFIG_64BIT
} else {
Elf64_Fdesc fdesc;
Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
goto give_sigsegv;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
haddr, regs->gr[19], in_syscall);
}
#endif
/* The syscall return path will create IAOQ values from r31.
*/
sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1;
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W;
#endif
/* If we are singlestepping, arrange a trap to be delivered
when we return to userspace. Note the semantics -- we
should trap before the first insn in the handler is
executed. Ref:
http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
*/
if (pa_psw(current)->r) {
pa_psw(current)->r = 0;
psw |= PSW_R;
mtctl(-1, 0);
}
regs->gr[0] = psw;
regs->iaoq[0] = haddr | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
}
regs->gr[2] = rp; /* userland return pointer */
regs->gr[26] = sig; /* signal number */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
} else
#endif
{
regs->gr[25] = A(&frame->info); /* siginfo pointer */
regs->gr[24] = A(&frame->uc); /* ucontext pointer */
}
DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
regs->gr[30], sigframe_size,
regs->gr[30] + sigframe_size);
/* Raise the user stack pointer to make a proper call frame. */
regs->gr[30] = (A(frame) + sigframe_size);
DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
current->comm, current->pid, frame, regs->gr[30],
regs->iaoq[0], regs->iaoq[1], rp);
return 1;
give_sigsegv:
DBG(1,"setup_rt_frame: sending SIGSEGV\n");
force_sigsegv(sig, current);
return 0;
}
/*
* OK, we're invoking a handler.
*/
static long
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs *regs, int in_syscall)
{
DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
sig, ka, info, oldset, regs);
/* Set up the stack frame */
if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
return 0;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
tracehook_signal_handler(sig, info, ka, regs,
test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP));
return 1;
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
regs->gr[28] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
* we have to do is fiddle the return pointer.
*/
regs->gr[31] -= 8; /* delayed branching */
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
break;
}
}
static inline void
insert_restart_trampoline(struct pt_regs *regs)
{
switch(regs->gr[28]) {
case -ERESTART_RESTARTBLOCK: {
/* Restart the system call - no handlers present */
unsigned int *usp = (unsigned int *)regs->gr[30];
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
* 8: ldw 0(%sp), %r31
* 12: be 0x100(%sr2, %r0)
* 16: ldi __NR_restart_syscall, %r20
*/
#ifdef CONFIG_64BIT
put_user(regs->gr[31] >> 32, &usp[0]);
put_user(regs->gr[31] & 0xffffffff, &usp[1]);
put_user(0x0fc010df, &usp[2]);
#else
put_user(regs->gr[31], &usp[0]);
put_user(0x0fc0109f, &usp[2]);
#endif
put_user(0xe0008200, &usp[3]);
put_user(0x34140000, &usp[4]);
/* Stack is 64-byte aligned, and we only need
* to flush 1 cache line.
* Flushing one cacheline is cheap.
* "sync" on bigger (> 4 way) boxes is not.
*/
flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4);
flush_user_icache_range(regs->gr[30], regs->gr[30] + 4);
regs->gr[31] = regs->gr[30] + 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
return;
}
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR: {
/* Hooray for delayed branching. We don't
* have to restore %r20 (the system call
* number) because it gets loaded in the delay
* slot of the branch external instruction.
*/
regs->gr[31] -= 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
return;
}
default:
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* We need to be able to restore the syscall arguments (r21-r26) to
* restart syscalls. Thus, the syscall path should save them in the
* pt_regs structure (it's okay to do so since they are caller-save
* registers). As noted below, the syscall number gets restored for
* us due to the magic of delayed branching.
*/
asmlinkage void
do_signal(struct pt_regs *regs, long in_syscall)
{
siginfo_t info;
struct k_sigaction ka;
int signr;
sigset_t *oldset;
DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
oldset, regs, regs->sr[7], in_syscall);
/* Everyone else checks to see if they are in kernel mode at
this point and exits if that's the case. I'm not sure why
we would be called in that case, but for some reason we
are. */
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
DBG(1,"do_signal: oldset %08lx / %08lx\n",
oldset->sig[0], oldset->sig[1]);
/* May need to force signal if handle_signal failed to deliver */
while (1) {
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
if (signr <= 0)
break;
/* Restart a system call if necessary. */
if (in_syscall)
syscall_restart(regs, &ka);
/* Whee! Actually deliver the signal. If the
delivery failed, we need to continue to iterate in
this loop so we can deliver the SIGSEGV... */
if (handle_signal(signr, &info, &ka, oldset,
regs, in_syscall)) {
DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
regs->gr[28]);
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
}
/* end of while(1) looping forever if we can't force a signal */
/* Did we come from a system call? */
if (in_syscall)
insert_restart_trampoline(regs);
DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
regs->gr[28]);
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
return;
}
void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_RESTORE_SIGMASK))
do_signal(regs, in_syscall);
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
| gpl-2.0 |
agrpallav/qemu-copypaste | ui/vnc-jobs.c | 76 | 9372 | /*
* QEMU VNC display driver
*
* Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
* Copyright (C) 2006 Fabrice Bellard
* Copyright (C) 2009 Red Hat, Inc
* Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "vnc.h"
#include "vnc-jobs.h"
#include "qemu/sockets.h"
/*
* Locking:
*
* There are three levels of locking:
* - jobs queue lock: for each operation on the queue (push, pop, isEmpty?)
* - VncDisplay global lock: mainly used for framebuffer updates to avoid
* screen corruption if the framebuffer is updated
* while the worker is doing something.
* - VncState::output lock: used to make sure the output buffer is not corrupted
* if two threads try to write on it at the same time
*
* While the VNC worker thread is working, the VncDisplay global lock is held
* to avoid screen corruption (this does not block vnc_refresh() because it
* uses trylock()) but the output lock is not held because the thread works on
* its own output buffer.
* When the encoding job is done, the worker thread will hold the output lock
* and copy its output buffer in vs->output.
*/
struct VncJobQueue {
QemuCond cond;
QemuMutex mutex;
QemuThread thread;
Buffer buffer;
bool exit;
QTAILQ_HEAD(, VncJob) jobs;
};
typedef struct VncJobQueue VncJobQueue;
/*
* We use a single global queue, but most of the functions are
* already reentrant, so we can easily add more than one encoding thread
*/
static VncJobQueue *queue;
static void vnc_lock_queue(VncJobQueue *queue)
{
qemu_mutex_lock(&queue->mutex);
}
static void vnc_unlock_queue(VncJobQueue *queue)
{
qemu_mutex_unlock(&queue->mutex);
}
VncJob *vnc_job_new(VncState *vs)
{
VncJob *job = g_malloc0(sizeof(VncJob));
job->vs = vs;
vnc_lock_queue(queue);
QLIST_INIT(&job->rectangles);
vnc_unlock_queue(queue);
return job;
}
int vnc_job_add_rect(VncJob *job, int x, int y, int w, int h)
{
VncRectEntry *entry = g_malloc0(sizeof(VncRectEntry));
entry->rect.x = x;
entry->rect.y = y;
entry->rect.w = w;
entry->rect.h = h;
vnc_lock_queue(queue);
QLIST_INSERT_HEAD(&job->rectangles, entry, next);
vnc_unlock_queue(queue);
return 1;
}
void vnc_job_push(VncJob *job)
{
vnc_lock_queue(queue);
if (queue->exit || QLIST_EMPTY(&job->rectangles)) {
g_free(job);
} else {
QTAILQ_INSERT_TAIL(&queue->jobs, job, next);
qemu_cond_broadcast(&queue->cond);
}
vnc_unlock_queue(queue);
}
static bool vnc_has_job_locked(VncState *vs)
{
VncJob *job;
QTAILQ_FOREACH(job, &queue->jobs, next) {
if (job->vs == vs || !vs) {
return true;
}
}
return false;
}
bool vnc_has_job(VncState *vs)
{
bool ret;
vnc_lock_queue(queue);
ret = vnc_has_job_locked(vs);
vnc_unlock_queue(queue);
return ret;
}
void vnc_jobs_clear(VncState *vs)
{
VncJob *job, *tmp;
vnc_lock_queue(queue);
QTAILQ_FOREACH_SAFE(job, &queue->jobs, next, tmp) {
if (job->vs == vs || !vs) {
QTAILQ_REMOVE(&queue->jobs, job, next);
}
}
vnc_unlock_queue(queue);
}
void vnc_jobs_join(VncState *vs)
{
vnc_lock_queue(queue);
while (vnc_has_job_locked(vs)) {
qemu_cond_wait(&queue->cond, &queue->mutex);
}
vnc_unlock_queue(queue);
vnc_jobs_consume_buffer(vs);
}
void vnc_jobs_consume_buffer(VncState *vs)
{
bool flush;
vnc_lock_output(vs);
if (vs->jobs_buffer.offset) {
vnc_write(vs, vs->jobs_buffer.buffer, vs->jobs_buffer.offset);
buffer_reset(&vs->jobs_buffer);
}
flush = vs->csock != -1 && vs->abort != true;
vnc_unlock_output(vs);
if (flush) {
vnc_flush(vs);
}
}
/*
* Copy data for local use
*/
static void vnc_async_encoding_start(VncState *orig, VncState *local)
{
local->vnc_encoding = orig->vnc_encoding;
local->features = orig->features;
local->vd = orig->vd;
local->lossy_rect = orig->lossy_rect;
local->write_pixels = orig->write_pixels;
local->client_pf = orig->client_pf;
local->client_be = orig->client_be;
local->tight = orig->tight;
local->zlib = orig->zlib;
local->hextile = orig->hextile;
local->zrle = orig->zrle;
local->output = queue->buffer;
local->csock = -1; /* Don't do any network work on this thread */
buffer_reset(&local->output);
}
static void vnc_async_encoding_end(VncState *orig, VncState *local)
{
orig->tight = local->tight;
orig->zlib = local->zlib;
orig->hextile = local->hextile;
orig->zrle = local->zrle;
orig->lossy_rect = local->lossy_rect;
queue->buffer = local->output;
}
static int vnc_worker_thread_loop(VncJobQueue *queue)
{
VncJob *job;
VncRectEntry *entry, *tmp;
VncState vs;
int n_rectangles;
int saved_offset;
vnc_lock_queue(queue);
while (QTAILQ_EMPTY(&queue->jobs) && !queue->exit) {
qemu_cond_wait(&queue->cond, &queue->mutex);
}
/* Here job can only be NULL if queue->exit is true */
job = QTAILQ_FIRST(&queue->jobs);
vnc_unlock_queue(queue);
if (queue->exit) {
return -1;
}
vnc_lock_output(job->vs);
if (job->vs->csock == -1 || job->vs->abort == true) {
vnc_unlock_output(job->vs);
goto disconnected;
}
vnc_unlock_output(job->vs);
/* Make a local copy of vs and switch output buffers */
vnc_async_encoding_start(job->vs, &vs);
/* Start sending rectangles */
n_rectangles = 0;
vnc_write_u8(&vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
vnc_write_u8(&vs, 0);
saved_offset = vs.output.offset;
vnc_write_u16(&vs, 0);
vnc_lock_display(job->vs->vd);
QLIST_FOREACH_SAFE(entry, &job->rectangles, next, tmp) {
int n;
if (job->vs->csock == -1) {
vnc_unlock_display(job->vs->vd);
goto disconnected;
}
n = vnc_send_framebuffer_update(&vs, entry->rect.x, entry->rect.y,
entry->rect.w, entry->rect.h);
if (n >= 0) {
n_rectangles += n;
}
g_free(entry);
}
vnc_unlock_display(job->vs->vd);
/* Put n_rectangles at the beginning of the message */
vs.output.buffer[saved_offset] = (n_rectangles >> 8) & 0xFF;
vs.output.buffer[saved_offset + 1] = n_rectangles & 0xFF;
vnc_lock_output(job->vs);
if (job->vs->csock != -1) {
buffer_reserve(&job->vs->jobs_buffer, vs.output.offset);
buffer_append(&job->vs->jobs_buffer, vs.output.buffer,
vs.output.offset);
/* Copy persistent encoding data */
vnc_async_encoding_end(job->vs, &vs);
qemu_bh_schedule(job->vs->bh);
}
vnc_unlock_output(job->vs);
disconnected:
vnc_lock_queue(queue);
QTAILQ_REMOVE(&queue->jobs, job, next);
vnc_unlock_queue(queue);
qemu_cond_broadcast(&queue->cond);
g_free(job);
return 0;
}
static VncJobQueue *vnc_queue_init(void)
{
VncJobQueue *queue = g_malloc0(sizeof(VncJobQueue));
qemu_cond_init(&queue->cond);
qemu_mutex_init(&queue->mutex);
QTAILQ_INIT(&queue->jobs);
return queue;
}
static void vnc_queue_clear(VncJobQueue *q)
{
qemu_cond_destroy(&queue->cond);
qemu_mutex_destroy(&queue->mutex);
buffer_free(&queue->buffer);
g_free(q);
queue = NULL; /* Unset global queue */
}
static void *vnc_worker_thread(void *arg)
{
VncJobQueue *queue = arg;
qemu_thread_get_self(&queue->thread);
while (!vnc_worker_thread_loop(queue)) ;
vnc_queue_clear(queue);
return NULL;
}
static bool vnc_worker_thread_running(void)
{
return queue; /* Check global queue */
}
void vnc_start_worker_thread(void)
{
VncJobQueue *q;
if (vnc_worker_thread_running())
return ;
q = vnc_queue_init();
qemu_thread_create(&q->thread, vnc_worker_thread, q, QEMU_THREAD_DETACHED);
queue = q; /* Set global queue */
}
void vnc_stop_worker_thread(void)
{
if (!vnc_worker_thread_running())
return ;
/* Remove all jobs and wake up the thread */
vnc_lock_queue(queue);
queue->exit = true;
vnc_unlock_queue(queue);
vnc_jobs_clear(NULL);
qemu_cond_broadcast(&queue->cond);
}
| gpl-2.0 |
bruce2728/kernel_lge_d802 | drivers/acpi/processor_idle.c | 76 | 33834 | /*
* processor_idle - idle state submodule to the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* - Added processor hotplug support
* Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added support for C3 on SMP
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/pm_qos.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
* asm/acpi.h is not an option, as it would require more include magic. Also
* creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
*/
#ifdef CONFIG_X86
#include <asm/apic.h>
#endif
#include <asm/io.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
#include <asm/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
#define C2_OVERHEAD 1 /* 1us */
#define C3_OVERHEAD 1 /* 1us */
#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000);
static int bm_check_disable __read_mostly;
module_param(bm_check_disable, uint, 0000);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
static int disabled_by_idle_boot_param(void)
{
return boot_option_idle_override == IDLE_POLL ||
boot_option_idle_override == IDLE_FORCE_MWAIT ||
boot_option_idle_override == IDLE_HALT;
}
/*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else.
*
* To skip this limit, boot/load with a large max_cstate limit.
*/
static int set_max_cstate(const struct dmi_system_id *id)
{
if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0;
printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
" Override with \"processor.max_cstate=%d\"\n", id->ident,
(long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
max_cstate = (long)id->driver_data;
return 0;
}
/* Actually this shouldn't be, would be better to fix the
callers to only run once -AK */
static struct dmi_system_id processor_power_dmi_table[] = {
{ set_max_cstate, "Clevo 5600D", {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
{ set_max_cstate, "Pavilion zv5000", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
(void *)1},
{ set_max_cstate, "Asus L8400B", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
{},
};
/*
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
* test NEED_RESCHED:
*/
smp_mb();
if (!need_resched()) {
safe_halt();
local_irq_disable();
}
current_thread_info()->status |= TS_POLLING;
}
#ifdef ARCH_APICTIMER_STOPS_ON_C3
/*
* Some BIOS implementations switch to C3 in the published C2 state.
* This seems to be a common problem on AMD boxen, but other vendors
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
* Check, if one of the previous states already marked the lapic
* unstable
*/
if (pwr->timer_broadcast_on_state < state)
return;
if (cx->type >= type)
pr->power.timer_broadcast_on_state = state;
}
static void __lapic_timer_propagate_broadcast(void *arg)
{
struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ?
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
clockevents_notify(reason, &pr->id);
}
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
(void *)pr, 1);
}
/* Power(C) State timer broadcast control */
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
int state = cx - pr->power.states;
if (state >= pr->power.timer_broadcast_on_state) {
unsigned long reason;
reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
clockevents_notify(reason, &pr->id);
}
}
#else
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
}
#endif
/*
* Suspend / resume control
*/
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
}
static void acpi_idle_bm_rld_restore(void)
{
u32 resumed_bm_rld;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld != saved_bm_rld)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
acpi_idle_bm_rld_save();
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
acpi_idle_bm_rld_restore();
return 0;
}
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
/*FALL THROUGH*/
default:
/* TSC could halt in idle, so notify users */
if (state > ACPI_STATE_C1)
mark_tsc_unstable("TSC halts in idle");
}
}
#else
static void tsc_check_state(int state) { return; }
#endif
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
if (!pr)
return -EINVAL;
if (!pr->pblk)
return -ENODEV;
/* if info is obtained from pblk/fadt, type equals state */
pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
#ifndef CONFIG_HOTPLUG_CPU
/*
* Check for P_LVL2_UP flag before entering C2 and above on
* an SMP system.
*/
if ((num_online_cpus() > 1) &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
/* determine C2 and C3 address from pblk */
pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
/*
* FADT specified C2 latency must be less than or equal to
* 100 microseconds.
*/
if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
/* invalidate C2 */
pr->power.states[ACPI_STATE_C2].address = 0;
}
/*
* FADT supplied C3 latency must be less than or equal to
* 1000 microseconds.
*/
if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
/* invalidate C3 */
pr->power.states[ACPI_STATE_C3].address = 0;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
pr->power.states[ACPI_STATE_C3].address));
return 0;
}
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
{
if (!pr->power.states[ACPI_STATE_C1].valid) {
/* set the first C-State to C1 */
/* all processors need to support C1 */
pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
pr->power.states[ACPI_STATE_C1].valid = 1;
pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
}
/* the C0 state only exists as a filler in our array */
pr->power.states[ACPI_STATE_C0].valid = 1;
return 0;
}
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
acpi_status status = 0;
u64 count;
int current_count;
int i;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *cst;
if (nocst)
return -ENODEV;
current_count = 0;
status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
return -ENODEV;
}
cst = buffer.pointer;
/* There must be at least 2 elements */
if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
printk(KERN_ERR PREFIX "not enough elements in _CST\n");
status = -EFAULT;
goto end;
}
count = cst->package.elements[0].integer.value;
/* Validate number of power states. */
if (count < 1 || count != cst->package.count - 1) {
printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
status = -EFAULT;
goto end;
}
/* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
for (i = 1; i <= count; i++) {
union acpi_object *element;
union acpi_object *obj;
struct acpi_power_register *reg;
struct acpi_processor_cx cx;
memset(&cx, 0, sizeof(cx));
element = &(cst->package.elements[i]);
if (element->type != ACPI_TYPE_PACKAGE)
continue;
if (element->package.count != 4)
continue;
obj = &(element->package.elements[0]);
if (obj->type != ACPI_TYPE_BUFFER)
continue;
reg = (struct acpi_power_register *)obj->buffer.pointer;
if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
(reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
continue;
/* There should be an easy way to extract an integer... */
obj = &(element->package.elements[1]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.type = obj->integer.value;
/*
* Some buggy BIOSes won't list C1 in _CST -
* Let acpi_processor_get_power_info_default() handle them later
*/
if (i == 1 && cx.type != ACPI_STATE_C1)
current_count++;
cx.address = reg->address;
cx.index = current_count + 1;
cx.entry_method = ACPI_CSTATE_SYSTEMIO;
if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_ffh_cstate_probe
(pr->id, &cx, reg) == 0) {
cx.entry_method = ACPI_CSTATE_FFH;
} else if (cx.type == ACPI_STATE_C1) {
/*
* C1 is a special case where FIXED_HARDWARE
* can be handled in non-MWAIT way as well.
* In that case, save this _CST entry info.
* Otherwise, ignore this info and continue.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
continue;
}
if (cx.type == ACPI_STATE_C1 &&
(boot_option_idle_override == IDLE_NOMWAIT)) {
/*
* In most cases the C1 space_id obtained from
* _CST object is FIXED_HARDWARE access mode.
* But when the option of idle=halt is added,
* the entry_method type should be changed from
* CSTATE_FFH to CSTATE_HALT.
* When the option of idle=nomwait is added,
* the C1 entry_method type should be
* CSTATE_HALT.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
}
} else {
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
cx.address);
}
if (cx.type == ACPI_STATE_C1) {
cx.valid = 1;
}
obj = &(element->package.elements[2]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.latency = obj->integer.value;
obj = &(element->package.elements[3]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.power = obj->integer.value;
current_count++;
memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
/*
* We support total ACPI_PROCESSOR_MAX_POWER - 1
* (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
*/
if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
printk(KERN_WARNING
"Limiting number of power states to max (%d)\n",
ACPI_PROCESSOR_MAX_POWER);
printk(KERN_WARNING
"Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break;
}
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
current_count));
/* Validate number of power states discovered */
if (current_count < 2)
status = -EFAULT;
end:
kfree(buffer.pointer);
return status;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
static int bm_check_flag = -1;
static int bm_control_flag = -1;
if (!cx->address)
return;
/*
* PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
* DMA transfers are used by any ISA device to avoid livelock.
* Note that we could disable Type-F DMA (as recommended by
* the erratum), but this is known to disrupt certain ISA
* devices thus we take the conservative approach.
*/
else if (errata.piix4.fdma) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 not supported on PIIX4 with Type-F DMA\n"));
return;
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
if (!pr->flags.bm_control) {
if (pr->flags.has_cst != 1) {
/* bus mastering control is necessary */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 support requires BM control\n"));
return;
} else {
/* Here we enter C3 without bus mastering */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 support without BM control\n"));
}
}
} else {
/*
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n"));
return;
}
}
/*
* Otherwise we've met all of our C3 requirements.
* Normalize the C3 latency to expidite policy. Enable
* checking of bus mastering status (bm_check) so we can
* use this in our C3 policy
*/
cx->valid = 1;
cx->latency_ticks = cx->latency;
/*
* On older chipsets, BM_RLD needs to be set
* in order for Bus Master activity to wake the
* system from C3. Newer chipsets handle DMA
* during C3 automatically and BM_RLD is a NOP.
* In either case, the proper way to
* handle BM_RLD is to set it and leave it set.
*/
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
return;
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
pr->power.timer_broadcast_on_state = INT_MAX;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
break;
case ACPI_STATE_C2:
if (!cx->address)
break;
cx->valid = 1;
cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
break;
}
if (!cx->valid)
continue;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
lapic_timer_propagate_broadcast(pr);
return (working);
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
{
unsigned int i;
int result;
/* NOTE: the idle thread may not be running while calling
* this function */
/* Zero initialize all the C-states info. */
memset(pr->power.states, 0, sizeof(pr->power.states));
result = acpi_processor_get_power_info_cst(pr);
if (result == -ENODEV)
result = acpi_processor_get_power_info_fadt(pr);
if (result)
return result;
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
/*
* if one state of type C2 or C3 is available, mark this
* CPU as being "idle manageable"
*/
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
if (pr->power.states[i].valid) {
pr->power.count = i;
if (pr->power.states[i].type >= ACPI_STATE_C2)
pr->flags.power = 1;
}
}
return 0;
}
/**
* acpi_idle_bm_check - checks if bus master activity was detected
*/
static int acpi_idle_bm_check(void)
{
u32 bm_status = 0;
if (bm_check_disable)
return 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
if (bm_status)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
/*
* PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
* the true state of bus mastering activity; forcing us to
* manually check the BMIDEA bit of each IDE channel.
*/
else if (errata.piix4.bmisx) {
if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
|| (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
bm_status = 1;
}
return bm_status;
}
/**
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
* @cx: cstate data
*
* Caller disables interrupt before call and enables interrupt after return.
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
/* IO port based C-state */
inb(cx->address);
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
start_critical_timings();
}
/**
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
* @dev: the target CPU
* @drv: cpuidle driver containing cpuidle state info
* @index: index of target state
*
* This is equivalent to the HALT instruction.
*/
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
ktime_t kt1, kt2;
s64 idle_time;
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
/* Update device last_residency*/
dev->last_residency = (int)idle_time;
local_irq_enable();
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return index;
}
/**
* acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
* @dev: the target CPU
* @index: the index of suggested state
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ACPI_FLUSH_CPU_CACHE();
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
/* See comment in acpi_idle_do_entry() */
inl(acpi_gbl_FADT.xpm_timer_block.address);
} else
return -ENODEV;
}
/* Never reached */
return 0;
}
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @drv: cpuidle driver with cpuidle state information
* @index: the index of suggested state
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
/* Update device last_residency*/
dev->last_residency = (int)idle_time;
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
static int c3_cpu_count;
static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @drv: cpuidle driver containing state data
* @index: the index of suggested state
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (drv->safe_state_index >= 0) {
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return -EINVAL;
}
}
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return -EINVAL;
}
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
/* Update device last_residency*/
dev->last_residency = (int)idle_time;
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return index;
}
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
/**
* acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
* device i.e. per-cpu data
*
* @pr: the ACPI processor
*/
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state_usage *state_usage;
struct cpuidle_device *dev = &pr->power.dev;
if (!pr->flags.power_setup_done)
return -EINVAL;
if (pr->flags.power == 0) {
return -EINVAL;
}
dev->cpu = pr->id;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
state_usage = &dev->states_usage[count];
if (!cx->valid)
continue;
#ifdef CONFIG_HOTPLUG_CPU
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
!pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
cpuidle_set_statedata(state_usage, cx);
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
dev->state_count = count;
if (!count)
return -EINVAL;
return 0;
}
/**
* acpi_processor_setup_cpuidle states- prepares and configures cpuidle
* global state data i.e. idle routines
*
* @pr: the ACPI processor
*/
static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
if (!pr->flags.power_setup_done)
return -EINVAL;
if (pr->flags.power == 0)
return -EINVAL;
drv->safe_state_index = -1;
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
if (!cx->valid)
continue;
#ifdef CONFIG_HOTPLUG_CPU
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
!pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
if (cx->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
break;
case ACPI_STATE_C3:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = pr->flags.bm_check ?
acpi_idle_enter_bm :
acpi_idle_enter_simple;
break;
}
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
drv->state_count = count;
if (!count)
return -EINVAL;
return 0;
}
int acpi_processor_hotplug(struct acpi_processor *pr)
{
int ret = 0;
if (disabled_by_idle_boot_param())
return 0;
if (!pr)
return -EINVAL;
if (nocst) {
return -ENODEV;
}
if (!pr->flags.power_setup_done)
return -ENODEV;
cpuidle_pause_and_lock();
cpuidle_disable_device(&pr->power.dev);
acpi_processor_get_power_info(pr);
if (pr->flags.power) {
acpi_processor_setup_cpuidle_cx(pr);
ret = cpuidle_enable_device(&pr->power.dev);
}
cpuidle_resume_and_unlock();
return ret;
}
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
int cpu;
struct acpi_processor *_pr;
if (disabled_by_idle_boot_param())
return 0;
if (!pr)
return -EINVAL;
if (nocst)
return -ENODEV;
if (!pr->flags.power_setup_done)
return -ENODEV;
/*
* FIXME: Design the ACPI notification to make it once per
* system instead of once per-cpu. This condition is a hack
* to make the code that updates C-States be called once.
*/
if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
cpuidle_pause_and_lock();
/* Protect against cpu-hotplug */
get_online_cpus();
/* Disable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
continue;
cpuidle_disable_device(&_pr->power.dev);
}
/* Populate Updated C-state information */
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
for_each_online_cpu(cpu) {
_pr = per_cpu(processors, cpu);
if (!_pr || !_pr->flags.power_setup_done)
continue;
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
acpi_processor_setup_cpuidle_cx(_pr);
cpuidle_enable_device(&_pr->power.dev);
}
}
put_online_cpus();
cpuidle_resume_and_unlock();
}
return 0;
}
static int acpi_processor_registered;
int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
int retval;
static int first_run;
if (disabled_by_idle_boot_param())
return 0;
if (!first_run) {
dmi_check_system(processor_power_dmi_table);
max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
printk(KERN_NOTICE
"ACPI: processor limited to max C-state %d\n",
max_cstate);
first_run++;
}
if (!pr)
return -EINVAL;
if (acpi_gbl_FADT.cst_control && !nocst) {
status =
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Notifying BIOS of _CST ability failed"));
}
}
acpi_processor_get_power_info(pr);
pr->flags.power_setup_done = 1;
/*
* Install the idle handler if processor power management is supported.
* Note that we use previously set idle handler will be used on
* platforms that only support C1.
*/
if (pr->flags.power) {
/* Register acpi_idle_driver if not already registered */
if (!acpi_processor_registered) {
acpi_processor_setup_cpuidle_states(pr);
retval = cpuidle_register_driver(&acpi_idle_driver);
if (retval)
return retval;
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
}
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
*/
acpi_processor_setup_cpuidle_cx(pr);
retval = cpuidle_register_device(&pr->power.dev);
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
return retval;
}
acpi_processor_registered++;
}
return 0;
}
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device)
{
if (disabled_by_idle_boot_param())
return 0;
if (pr->flags.power) {
cpuidle_unregister_device(&pr->power.dev);
acpi_processor_registered--;
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
}
pr->flags.power_setup_done = 0;
return 0;
}
| gpl-2.0 |
latlontude/linux | net/sunrpc/sched.c | 76 | 29489 | /*
* linux/net/sunrpc/sched.c
*
* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
*
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
#include "sunrpc.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
#define RPCDBG_FACILITY RPCDBG_SCHED
#endif
#define CREATE_TRACE_POINTS
#include <trace/events/sunrpc.h>
/*
* RPC slabs and memory pools
*/
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
static struct kmem_cache *rpc_task_slabp __read_mostly;
static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
static void __rpc_queue_timer_fn(unsigned long ptr);
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
struct workqueue_struct *rpciod_workqueue;
/*
* Disable the timer for a given RPC task. Should be called with
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
*/
static void
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (task->tk_timeout == 0)
return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout = 0;
list_del(&task->u.tk_wait.timer_list);
if (list_empty(&queue->timer_list.list))
del_timer(&queue->timer_list.timer);
}
static void
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
queue->timer_list.expires = expires;
mod_timer(&queue->timer_list.timer, expires);
}
/*
* Set up a timer for the current task.
*/
static void
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (!task->tk_timeout)
return;
dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
task->u.tk_wait.expires = jiffies + task->tk_timeout;
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
rpc_set_queue_timer(queue, task->u.tk_wait.expires);
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
{
struct list_head *q = &queue->tasks[queue->priority];
struct rpc_task *task;
if (!list_empty(q)) {
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
if (task->tk_owner == queue->owner)
list_move_tail(&task->u.tk_wait.list, q);
}
}
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
if (queue->priority != priority) {
/* Fairness: rotate the list when changing priority */
rpc_rotate_queue_owner(queue);
queue->priority = priority;
}
}
static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
{
queue->owner = pid;
queue->nr = RPC_BATCH_COUNT;
}
static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
rpc_set_waitqueue_owner(queue, 0);
}
/*
* Add new request to a priority queue.
*/
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
struct rpc_task *task,
unsigned char queue_priority)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
if (unlikely(queue_priority > queue->maxpriority))
queue_priority = queue->maxpriority;
if (queue_priority > queue->priority)
rpc_set_waitqueue_priority(queue, queue_priority);
q = &queue->tasks[queue_priority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
}
list_add_tail(&task->u.tk_wait.list, q);
}
/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
struct rpc_task *task,
unsigned char queue_priority)
{
WARN_ON_ONCE(RPC_IS_QUEUED(task));
if (RPC_IS_QUEUED(task))
return;
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task, queue_priority);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
/* barrier matches the read in rpc_wake_up_task_queue_locked() */
smp_wmb();
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
/*
* Remove request from a priority queue.
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
struct rpc_task *t;
if (!list_empty(&task->u.tk_wait.links)) {
t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
}
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
spin_lock_init(&queue->lock);
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
INIT_LIST_HEAD(&queue->tasks[i]);
queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
queue->qlen = 0;
setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
INIT_LIST_HEAD(&queue->timer_list.list);
rpc_assign_waitqueue_name(queue, qname);
}
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, 1);
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
{
del_timer_sync(&queue->timer_list.timer);
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
freezable_schedule_unsafe();
return 0;
}
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
static atomic_t rpc_pid;
task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
{
}
#endif
static void rpc_set_active(struct rpc_task *task)
{
trace_rpc_task_begin(task->tk_client, task, NULL);
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
}
/*
* Mark an RPC call as having completed by clearing the 'active' bit
* and then waking up all tasks that were sleeping.
*/
static int rpc_complete_task(struct rpc_task *task)
{
void *m = &task->tk_runstate;
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
unsigned long flags;
int ret;
trace_rpc_task_complete(task->tk_client, task, NULL);
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
ret = atomic_dec_and_test(&task->tk_count);
if (waitqueue_active(wq))
__wake_up_locked_key(wq, TASK_NORMAL, &k);
spin_unlock_irqrestore(&wq->lock, flags);
return ret;
}
/*
* Allow callers to wait for completion of an RPC call
*
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
* to enforce taking of the wq->lock and hence avoid races with
* rpc_complete_task().
*/
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
{
if (action == NULL)
action = rpc_wait_bit_killable;
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
* Note: If the task is ASYNC, and is being made runnable after sitting on an
* rpc_wait_queue, this must be called with the queue spinlock held to protect
* the wait queue operation.
* Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
* which is needed to ensure that __rpc_execute() doesn't loop (due to the
* lockless RPC_IS_QUEUED() test) before we've had a chance to test
* the RPC_TASK_RUNNING flag.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
bool need_wakeup = !rpc_test_and_set_running(task);
rpc_clear_queued(task);
if (!need_wakeup)
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(rpciod_workqueue, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
/*
* Prepare for sleeping on a wait queue.
* By always appending tasks to the list we ensure FIFO behavior.
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
struct rpc_task *task,
rpc_action action,
unsigned char queue_priority)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
trace_rpc_task_sleep(task->tk_client, task, q);
__rpc_add_wait_queue(q, task, queue_priority);
WARN_ON_ONCE(task->tk_callback != NULL);
task->tk_callback = action;
__rpc_add_timer(q, task);
}
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
/* We shouldn't ever put an inactive task to sleep */
WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
if (!RPC_IS_ACTIVATED(task)) {
task->tk_status = -EIO;
rpc_put_task_async(task);
return;
}
/*
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
__rpc_sleep_on_priority(q, task, action, task->tk_priority);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, int priority)
{
/* We shouldn't ever put an inactive task to sleep */
WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
if (!RPC_IS_ACTIVATED(task)) {
task->tk_status = -EIO;
rpc_put_task_async(task);
return;
}
/*
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
* @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
/* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
return;
}
trace_rpc_task_wakeup(task->tk_client, task, queue);
__rpc_remove_wait_queue(queue, task);
rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n");
}
/*
* Wake up a queued task while the queue lock is being held
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (RPC_IS_QUEUED(task)) {
smp_rmb();
if (task->tk_waitqueue == queue)
__rpc_do_wake_up_task(queue, task);
}
}
/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
spin_lock_bh(&queue->lock);
rpc_wake_up_task_queue_locked(queue, task);
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
* Wake up the next task on a priority queue.
*/
static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
{
struct list_head *q;
struct rpc_task *task;
/*
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
if (queue->owner == task->tk_owner) {
if (--queue->nr)
goto out;
list_move_tail(&task->u.tk_wait.list, q);
}
/*
* Check if we need to switch queues.
*/
goto new_owner;
}
/*
* Service the next queue.
*/
do {
if (q == &queue->tasks[0])
q = &queue->tasks[queue->maxpriority];
else
q = q - 1;
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
rpc_reset_waitqueue_priority(queue);
return NULL;
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
return task;
}
static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
{
if (RPC_IS_PRIORITY(queue))
return __rpc_find_next_queued_priority(queue);
if (!list_empty(&queue->tasks[0]))
return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
return NULL;
}
/*
* Wake up the first task on the wait queue.
*/
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
bool (*func)(struct rpc_task *, void *), void *data)
{
struct rpc_task *task = NULL;
dprintk("RPC: wake_up_first(%p \"%s\")\n",
queue, rpc_qname(queue));
spin_lock_bh(&queue->lock);
task = __rpc_find_next_queued(queue);
if (task != NULL) {
if (func(task, data))
rpc_wake_up_task_queue_locked(queue, task);
else
task = NULL;
}
spin_unlock_bh(&queue->lock);
return task;
}
EXPORT_SYMBOL_GPL(rpc_wake_up_first);
static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
{
return true;
}
/*
* Wake up the next task on the wait queue.
*/
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
{
return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/**
* rpc_wake_up - wake up all rpc_tasks
* @queue: rpc_wait_queue on which the tasks are sleeping
*
* Grabs queue->lock
*/
void rpc_wake_up(struct rpc_wait_queue *queue)
{
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
while (!list_empty(head)) {
struct rpc_task *task;
task = list_first_entry(head,
struct rpc_task,
u.tk_wait.list);
rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up);
/**
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
* @queue: rpc_wait_queue on which the tasks are sleeping
* @status: status value to set
*
* Grabs queue->lock
*/
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
while (!list_empty(head)) {
struct rpc_task *task;
task = list_first_entry(head,
struct rpc_task,
u.tk_wait.list);
task->tk_status = status;
rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
static void __rpc_queue_timer_fn(unsigned long ptr)
{
struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
struct rpc_task *task, *n;
unsigned long expires, now, timeo;
spin_lock(&queue->lock);
expires = now = jiffies;
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
timeo = task->u.tk_wait.expires;
if (time_after_eq(now, timeo)) {
dprintk("RPC: %5u timeout\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
rpc_wake_up_task_queue_locked(queue, task);
continue;
}
if (expires == now || time_after(expires, timeo))
expires = timeo;
}
if (!list_empty(&queue->timer_list.list))
rpc_set_queue_timer(queue, expires);
spin_unlock(&queue->lock);
}
static void __rpc_atrun(struct rpc_task *task)
{
if (task->tk_status == -ETIMEDOUT)
task->tk_status = 0;
}
/*
* Run a task at a later time
*/
void rpc_delay(struct rpc_task *task, unsigned long delay)
{
task->tk_timeout = delay;
rpc_sleep_on(&delay_queue, task, __rpc_atrun);
}
EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
static void
rpc_init_task_statistics(struct rpc_task *task)
{
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
/* starting timestamp */
task->tk_start = ktime_get();
}
static void
rpc_reset_task_statistics(struct rpc_task *task)
{
task->tk_timeouts = 0;
task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
rpc_init_task_statistics(task);
}
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
void rpc_exit_task(struct rpc_task *task)
{
task->tk_action = NULL;
if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
rpc_reset_task_statistics(task);
}
}
}
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
task->tk_action = rpc_exit_task;
if (RPC_IS_QUEUED(task))
rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
if (ops->rpc_release != NULL)
ops->rpc_release(calldata);
}
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
static void __rpc_execute(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
int task_is_async = RPC_IS_ASYNC(task);
int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
WARN_ON_ONCE(RPC_IS_QUEUED(task));
if (RPC_IS_QUEUED(task))
return;
for (;;) {
void (*do_action)(struct rpc_task *);
/*
* Execute any pending callback first.
*/
do_action = task->tk_callback;
task->tk_callback = NULL;
if (do_action == NULL) {
/*
* Perform the next FSM step.
* tk_action may be NULL if the task has been killed.
* In particular, note that rpc_killall_tasks may
* do this at any time, so beware when dereferencing.
*/
do_action = task->tk_action;
if (do_action == NULL)
break;
}
trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
do_action(task);
/*
* Lockless check for whether task is sleeping or not.
*/
if (!RPC_IS_QUEUED(task))
continue;
/*
* The queue->lock protects against races with
* rpc_make_runnable().
*
* Note that once we clear RPC_TASK_RUNNING on an asynchronous
* rpc_task, rpc_make_runnable() can assign it to a
* different workqueue. We therefore cannot assume that the
* rpc_task pointer may still be dereferenced.
*/
queue = task->tk_waitqueue;
spin_lock_bh(&queue->lock);
if (!RPC_IS_QUEUED(task)) {
spin_unlock_bh(&queue->lock);
continue;
}
rpc_clear_running(task);
spin_unlock_bh(&queue->lock);
if (task_is_async)
return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
}
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
}
dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
}
/*
* User-visible entry point to the scheduler.
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
* NOTE: Upon exit of this function the task is guaranteed to be
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
void rpc_execute(struct rpc_task *task)
{
bool is_async = RPC_IS_ASYNC(task);
rpc_set_active(task);
rpc_make_runnable(task);
if (!is_async)
__rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)
{
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
* @size: requested byte size
*
* To prevent rpciod from hanging, this allocator never sleeps,
* returning NULL and suppressing warning if the request cannot be serviced
* immediately.
* The caller can arrange to sleep in a way that is safe for rpciod.
*
* Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers.
*
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we avoid using GFP_KERNEL.
*/
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
gfp |= __GFP_MEMALLOC;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
buf = mempool_alloc(rpc_buffer_mempool, gfp);
else
buf = kmalloc(size, gfp);
if (!buf)
return NULL;
buf->len = size;
dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
task->tk_pid, size, buf);
return &buf->data;
}
EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free buffer allocated via rpc_malloc
* @buffer: buffer to free
*
*/
void rpc_free(void *buffer)
{
size_t size;
struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len;
dprintk("RPC: freeing buffer of size %zu at %p\n",
size, buf);
if (size <= RPC_BUFFER_MAXSIZE)
mempool_free(buf, rpc_buffer_mempool);
else
kfree(buf);
}
EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
*/
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
atomic_set(&task->tk_count, 1);
task->tk_flags = task_setup_data->flags;
task->tk_ops = task_setup_data->callback_ops;
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
task->tk_workqueue = task_setup_data->workqueue;
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
}
static struct rpc_task *
rpc_alloc_task(void)
{
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
}
/*
* Create a new task for the specified client.
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
{
struct rpc_task *task = setup_data->task;
unsigned short flags = 0;
if (task == NULL) {
task = rpc_alloc_task();
if (task == NULL) {
rpc_release_calldata(setup_data->callback_ops,
setup_data->callback_data);
return ERR_PTR(-ENOMEM);
}
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task;
}
/*
* rpc_free_task - release rpc task and perform cleanups
*
* Note that we free up the rpc_task _after_ rpc_release_calldata()
* in order to work around a workqueue dependency issue.
*
* Tejun Heo states:
* "Workqueue currently considers two work items to be the same if they're
* on the same address and won't execute them concurrently - ie. it
* makes a work item which is queued again while being executed wait
* for the previous execution to complete.
*
* If a work function frees the work item, and then waits for an event
* which should be performed by another work item and *that* work item
* recycles the freed work item, it can create a false dependency loop.
* There really is no reliable way to detect this short of verifying
* every memory free."
*
*/
static void rpc_free_task(struct rpc_task *task)
{
unsigned short tk_flags = task->tk_flags;
rpc_release_calldata(task->tk_ops, task->tk_calldata);
if (tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
}
static void rpc_async_release(struct work_struct *work)
{
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
}
static void rpc_release_resources_task(struct rpc_task *task)
{
xprt_release(task);
if (task->tk_msg.rpc_cred) {
put_rpccred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
}
static void rpc_final_put_task(struct rpc_task *task,
struct workqueue_struct *q)
{
if (q != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(q, &task->u.tk_work);
} else
rpc_free_task(task);
}
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
if (atomic_dec_and_test(&task->tk_count)) {
rpc_release_resources_task(task);
rpc_final_put_task(task, q);
}
}
void rpc_put_task(struct rpc_task *task)
{
rpc_do_put_task(task, NULL);
}
EXPORT_SYMBOL_GPL(rpc_put_task);
void rpc_put_task_async(struct rpc_task *task)
{
rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
WARN_ON_ONCE(RPC_IS_QUEUED(task));
rpc_release_resources_task(task);
/*
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
* so it should be safe to use task->tk_count as a test for whether
* or not any other processes still hold references to our rpc_task.
*/
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
/* Wake up anyone who may be waiting for task completion */
if (!rpc_complete_task(task))
return;
} else {
if (!atomic_dec_and_test(&task->tk_count))
return;
}
rpc_final_put_task(task, task->tk_workqueue);
}
int rpciod_up(void)
{
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
}
void rpciod_down(void)
{
module_put(THIS_MODULE);
}
/*
* Start up the rpciod workqueue.
*/
static int rpciod_start(void)
{
struct workqueue_struct *wq;
/*
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
static void rpciod_stop(void)
{
struct workqueue_struct *wq = NULL;
if (rpciod_workqueue == NULL)
return;
dprintk("RPC: destroying workqueue rpciod\n");
wq = rpciod_workqueue;
rpciod_workqueue = NULL;
destroy_workqueue(wq);
}
void
rpc_destroy_mempool(void)
{
rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
mempool_destroy(rpc_task_mempool);
if (rpc_task_slabp)
kmem_cache_destroy(rpc_task_slabp);
if (rpc_buffer_slabp)
kmem_cache_destroy(rpc_buffer_slabp);
rpc_destroy_wait_queue(&delay_queue);
}
int
rpc_init_mempool(void)
{
/*
* The following is not strictly a mempool initialisation,
* but there is no harm in doing it here
*/
rpc_init_wait_queue(&delay_queue, "delayq");
if (!rpciod_start())
goto err_nomem;
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
}
| gpl-2.0 |
heesub/linux | drivers/crypto/caam/error.c | 76 | 9071 | /*
* CAAM Error Reporting
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
#include "desc.h"
#include "error.h"
static const struct {
u8 value;
const char *error_text;
} desc_error_list[] = {
{ 0x00, "No error." },
{ 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
{ 0x02, "SGT Null Entry Error." },
{ 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
{ 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
{ 0x05, "Reserved." },
{ 0x06, "Invalid KEY Command" },
{ 0x07, "Invalid LOAD Command" },
{ 0x08, "Invalid STORE Command" },
{ 0x09, "Invalid OPERATION Command" },
{ 0x0A, "Invalid FIFO LOAD Command" },
{ 0x0B, "Invalid FIFO STORE Command" },
{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
{ 0x0E, "Invalid MATH Command" },
{ 0x0F, "Invalid SIGNATURE Command" },
{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
{ 0x12, "Shared Descriptor Header Error" },
{ 0x13, "Header Error. Invalid length or parity, or certain other problems." },
{ 0x14, "Burster Error. Burster has gotten to an illegal state" },
{ 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
{ 0x16, "DMA Error" },
{ 0x17, "Reserved." },
{ 0x1A, "Job failed due to JR reset" },
{ 0x1B, "Job failed due to Fail Mode" },
{ 0x1C, "DECO Watchdog timer timeout error" },
{ 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
{ 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
{ 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
{ 0x20, "DECO has completed a reset initiated via the DRR register" },
{ 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
{ 0x23, "Read Input Frame error" },
{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
{ 0x80, "DNR (do not run) error" },
{ 0x81, "undefined protocol command" },
{ 0x82, "invalid setting in PDB" },
{ 0x83, "Anti-replay LATE error" },
{ 0x84, "Anti-replay REPLAY error" },
{ 0x85, "Sequence number overflow" },
{ 0x86, "Sigver invalid signature" },
{ 0x87, "DSA Sign Illegal test descriptor" },
{ 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
{ 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
{ 0xC1, "Blob Command error: Undefined mode" },
{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
{ 0xC4, "Blob Command error: Black Blob key or input size error" },
{ 0xC5, "Blob Command error: Invalid key destination" },
{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
{ 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
static const char * const cha_id_list[] = {
"",
"AES",
"DES",
"ARC4",
"MDHA",
"RNG",
"SNOW f8",
"Kasumi f8/9",
"PKHA",
"CRCA",
"SNOW f9",
"ZUCE",
"ZUCA",
};
static const char * const err_id_list[] = {
"No error.",
"Mode error.",
"Data size error.",
"Key size error.",
"PKHA A memory size error.",
"PKHA B memory size error.",
"Data arrived out of sequence error.",
"PKHA divide-by-zero error.",
"PKHA modulus even error.",
"DES key parity error.",
"ICV check failed.",
"Hardware error.",
"Unsupported CCM AAD size.",
"Class 1 CHA is not reset",
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
static const char * const rng_err_id_list[] = {
"",
"",
"",
"Instantiate",
"Not instantiated",
"Test instantiate",
"Prediction resistance",
"Prediction resistance and test request",
"Uninstantiate",
"Secure key generation",
};
static void report_ccb_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
JRSTA_DECOERR_INDEX_SHIFT;
char *idx_str;
const char *cha_str = "unidentified cha_id value 0x";
char cha_err_code[3] = { 0 };
const char *err_str = "unidentified err_id value 0x";
char err_err_code[3] = { 0 };
if (status & JRSTA_DECOERR_JUMP)
idx_str = "jump tgt desc idx";
else
idx_str = "desc idx";
if (cha_id < ARRAY_SIZE(cha_id_list))
cha_str = cha_id_list[cha_id];
else
snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
err_id < ARRAY_SIZE(rng_err_id_list) &&
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
} else {
err_str = err_id_list[err_id];
}
/*
* CCB ICV check failures are part of normal operation life;
* we leave the upper layers to do what they want with them.
*/
if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
status, error, idx_str, idx,
cha_str, cha_err_code,
err_str, err_err_code);
}
static void report_jump_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
static void report_deco_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
JRSTA_DECOERR_INDEX_SHIFT;
char *idx_str;
const char *err_str = "unidentified error value 0x";
char err_err_code[3] = { 0 };
int i;
if (status & JRSTA_DECOERR_JUMP)
idx_str = "jump tgt desc idx";
else
idx_str = "desc idx";
for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
if (desc_error_list[i].value == err_id)
break;
if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
err_str = desc_error_list[i].error_text;
else
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
status, error, idx_str, idx, err_str, err_err_code);
}
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
static void report_cond_code_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
void caam_jr_strstatus(struct device *jrdev, u32 status)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
const char *error);
const char *error;
} status_src[16] = {
{ NULL, "No error" },
{ NULL, NULL },
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
{ NULL, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
{ NULL, NULL },
};
u32 ssrc = status >> JRSTA_SSRC_SHIFT;
const char *error = status_src[ssrc].error;
/*
* If there is an error handling function, call it to report the error.
* Otherwise print the error source name.
*/
if (status_src[ssrc].report_ssed)
status_src[ssrc].report_ssed(jrdev, status, error);
else if (error)
dev_err(jrdev, "%d: %s\n", ssrc, error);
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
EXPORT_SYMBOL(caam_jr_strstatus);
| gpl-2.0 |
bq/aquaris-E4 | drivers/misc/mediatek/hdmi/Sii8338/si_drv_mdt_tx.c | 76 | 14011 | /*
Silicon Image Driver Extension
Copyright (C) 2012 Silicon Image Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation version 2.
This program is distributed .as is. WITHOUT ANY WARRANTY of any
kind, whether express or implied; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the
GNU General Public License for more details.
*/
/* !file si_drv_mdt_tx.c */
/* !brief Silicon Image implementation */
/* */
#ifdef MDT_SUPPORT
#include "si_common.h"
#ifndef __KERNEL__
#include "hal_timers.h"
#endif /* not defined __KERNEL */
#include "sii_hal.h"
#include "si_cra.h"
#include "si_cra_cfg.h"
#include "si_mhl_defs.h"
#include "si_mhl_tx_api.h"
#include "si_mhl_tx_base_drv_api.h"
#include "si_drv_mdt_tx.h"
#include "si_mdt_inputdev.h"
#include "../../platform/hal/sii_hal_priv.h"
#ifdef MDT_SUPPORT
unsigned char g_mdt_is_ready = 0;
struct device *g_mdt_dev = 0;
struct class *g_mdt_class = 0;
static struct g_mdt_t {
struct i2c_client *i2c_cbus_client;
unsigned char is_ready;
} g_mdt = {
.i2c_cbus_client = 0, .is_ready = 0,};
#endif
/* #define mdt_i2c_cbus_read_block() */
/* #define mdt_i2c_cbus_write_block() */
static void mdt_cbus_read_block(unsigned char reg_offset, unsigned char byte_count,
unsigned char *data)
{
unsigned char ret;
#if 0
struct i2c_msg i2cMsg[2];
i2cMsg[0].addr = (0xC8 >> 1);
i2cMsg[0].flags = 0;
i2cMsg[0].len = 1;
i2cMsg[0].buf = ®_offset;
i2cMsg[1].addr = (0xC8 >> 1);
i2cMsg[1].flags = 1;
i2cMsg[1].len = byte_count;
i2cMsg[1].buf = data;
if (g_mdt.i2c_cbus_client == 0) {
return;
}
ret = i2c_transfer(g_mdt.i2c_cbus_client->adapter, i2cMsg, 2);
if (ret < 0) {
return;
}
#else
int32_t status;
u32 client_main_addr = g_mdt.i2c_cbus_client->addr;
g_mdt.i2c_cbus_client->addr = 0xC8 >> 1;
/* set spedd, panjie */
g_mdt.i2c_cbus_client->ext_flag |= I2C_HS_FLAG;
/* */
g_mdt.i2c_cbus_client->ext_flag |= I2C_DIRECTION_FLAG;
status = i2c_master_send(g_mdt.i2c_cbus_client, (const char *)®_offset, 1);
if (status < 0) {
printk(" mdt_cbus_read_block,%d send error\n", __LINE__);
}
/* set spedd, panjie */
g_mdt.i2c_cbus_client->ext_flag |= I2C_HS_FLAG;
/* */
status = i2c_master_recv(g_mdt.i2c_cbus_client, data, byte_count);
g_mdt.i2c_cbus_client->addr = client_main_addr;
#endif
#ifdef I2C_ANALYZER
MHL_log_event(I2C_BLOCK_0x73, reg_offset, data[0]);
#endif
}
static void mdt_cbus_write_block(unsigned char reg_offset, unsigned char byte_count,
unsigned char *data)
{
unsigned char ret;
unsigned char buffer[0x0F + 1];
if (byte_count > 0x0F)
return;
buffer[0] = reg_offset;
memcpy(&buffer[1], data, byte_count);
ret = i2c_master_send(g_mdt.i2c_cbus_client, buffer, (byte_count + 1));
if (ret < 0) {
return;
}
#ifdef I2C_ANALYZER
MHL_log_event(I2C_BLOCK_0x72, reg_offset, data[0]);
#endif
}
static void mdt_cbus_write_byte(unsigned char reg_offset, unsigned char value)
{
mdt_cbus_write_block(reg_offset, 1, &value);
}
static unsigned char mdt_cbus_read_byte(unsigned char reg_offset)
{
unsigned char ret;
mdt_cbus_read_block(reg_offset, 1, &ret);
return ret;
}
#ifdef MDT_SUPPORT_DEBUG
struct input_event g_events[15000];
struct input_event *g_e;
static void init_log_file(void);
static void deregster_log_file(void);
#endif
struct msc_request g_prior_msc_request = { 0, 0 };
extern struct si_mdt_inputdevs_t mdt;
static uint8_t sii8338_parse_received_burst_for_mdt(union mdt_event_t *mdt_packet,
SiiReg_t scratchpad_offset)
{
memset(mdt_packet->bytes, 0x0, 0x0F);
mdt_cbus_read_block(scratchpad_offset, MDT_MIN_PACKET_LENGTH, mdt_packet->bytes);
if (mdt_packet->header.isHID == 0)
return 0xFF;
if ((mdt_packet->header.isKeyboard) || (mdt_packet->header.isNotLast)
|| (mdt_packet->event_mouse.header.isNotMouse))
mdt_cbus_read_block(scratchpad_offset + MDT_MIN_PACKET_LENGTH,
MDT_KEYBOARD_PACKET_TAIL_LENGTH,
(mdt_packet->bytes + MDT_MIN_PACKET_LENGTH));
printk(KERN_ERR "MHL data: %x %x %x %x %x %x %x %x %x\n",
mdt_packet->bytes[0],
mdt_packet->bytes[1],
mdt_packet->bytes[2],
mdt_packet->bytes[3],
mdt_packet->bytes[4],
mdt_packet->bytes[5],
mdt_packet->bytes[6], mdt_packet->bytes[7], mdt_packet->bytes[8]);
printk(KERN_ERR "MHL info: %x %x %x\n",
mdt_packet->header.isKeyboard,
mdt_packet->event_cursor.header.touch.isNotMouse,
mdt_packet->event_cursor.body.suffix.isGame);
if (mdt_packet->header.isKeyboard) {
mdt_generate_event_keyboard(&(mdt_packet->event_keyboard));
} else if (mdt_packet->event_cursor.header.touch.isNotMouse == 0) {
mdt_generate_event_mouse(&(mdt_packet->event_mouse));
} else if (mdt_packet->event_cursor.body.suffix.isGame == 0) {
mdt_generate_event_touchscreen(&(mdt_packet->event_cursor), 1);
} else {
mdt_generate_event_gamepad(&(mdt_packet->event_cursor));
}
return 0;
}
static void sii8338_msc_req_for_mdt(uint8_t req_type, uint8_t offset, uint8_t first_data)
{
if ((offset != g_prior_msc_request.offset) &&
(first_data != g_prior_msc_request.first_data)) {
g_prior_msc_request.offset = offset;
g_prior_msc_request.first_data = first_data;
mdt_cbus_write_block(0x13, 2, (uint8_t *) &g_prior_msc_request);
} else if (offset != g_prior_msc_request.offset) {
g_prior_msc_request.offset = offset;
SiiRegWrite(TX_PAGE_CBUS | 0x0013, g_prior_msc_request.offset);
} else if (first_data != g_prior_msc_request.first_data) {
g_prior_msc_request.first_data = first_data;
SiiRegWrite(TX_PAGE_CBUS | 0x0014, g_prior_msc_request.first_data);
}
SiiRegWrite(TX_PAGE_CBUS | 0x0012, req_type);
}
uint8_t sii8338_irq_for_mdt(enum mdt_state *mdt_state)
{
uint8_t ret = 0;
uint8_t intr;
union mdt_event_t mdt_packet; /* Should not use typedef. Change this in the future. */
MHL_log_event(ISR_MDT_BEGIN, 0, *mdt_state);
mdt_init();
switch (*mdt_state) {
case WAIT_FOR_REQ_WRT:
intr = SiiRegRead(TX_PAGE_CBUS | 0x00A0);
MHL_log_event(ISR_WRITEBURST_CAUGHT, TX_PAGE_CBUS | 0x00A0, intr);
if (intr & MHL_INT_REQ_WRT) {
sii8338_msc_req_for_mdt(0x01 << 3, OFFSET_SET_INT, GRT_WRT);
SiiRegWrite(TX_PAGE_CBUS | 0x00A0, (MHL_INT_REQ_WRT | MHL_INT_DSCR_CHG));
ret = MDT_EVENT_HANDLED;
*mdt_state = WAIT_FOR_GRT_WRT_COMPLETE;
}
break;
case WAIT_FOR_GRT_WRT_COMPLETE:
intr = SiiRegRead(TX_PAGE_CBUS | 0x0008);
MHL_log_event(ISR_WRITEBURST_CAUGHT, TX_PAGE_CBUS | 0x0008, intr);
if (intr & BIT4) {
SiiRegWrite(TX_PAGE_CBUS | 0x0008, BIT4);
ret = MDT_EVENT_HANDLED;
*mdt_state = WAIT_FOR_WRITE_BURST_COMPLETE;
}
break;
case WAIT_FOR_WRITE_BURST_COMPLETE:
intr = SiiRegRead(TX_PAGE_CBUS | 0x001E);
MHL_log_event(ISR_WRITEBURST_CAUGHT, TX_PAGE_CBUS | 0x001E, intr);
if (intr & BIT0) {
sii8338_parse_received_burst_for_mdt(&mdt_packet, TX_PAGE_CBUS | 0x00C2);
if (mdt_packet.header.isNotLast)
sii8338_parse_received_burst_for_mdt(&mdt_packet,
TX_PAGE_CBUS | 0x00C9);
SiiRegWrite(TX_PAGE_CBUS | 0x001E, BIT0);
ret = MDT_EVENT_HANDLED;
*mdt_state = WAIT_FOR_REQ_WRT;
}
break;
case IDLE:
default:
break;
}
MHL_log_event(ISR_MDT_END, 0xFF, 0xFF);
return ret;
}
void mdt_init(void)
{
if (g_mdt_is_ready != 0)
return;
if (gMhlDevice.pI2cClient == 0)
return;
g_mdt.i2c_cbus_client = gMhlDevice.pI2cClient;
#ifdef MDT_SUPPORT_DEBUG
init_log_file();
#endif
mdt_input_init();
g_mdt_is_ready = 1;
}
void mdt_deregister(void)
{
if (g_mdt_is_ready == 0)
return;
#ifdef MDT_SUPPORT_DEBUG
deregster_log_file();
#endif
mdt_input_deregister();
g_mdt_is_ready = 0;
}
#ifdef MDT_SUPPORT_DEBUG
static void init_events(void)
{
int wevent;
g_e = g_events;
for (wevent = (int)0; wevent < (int)ARRAY_SIZE(g_events); wevent++)
g_events[wevent].type = (int)-1;
}
inline void MHL_log_event(int type, int code, int value)
{
printk(KERN_ERR "MDT %x %x %x\n", type, code, value);
/*
struct input_event *e = g_e;
if (g_mdt_is_ready == 0)
return;
do_gettimeofday(&(e->time));
e->type = type;
e->code = code;
e->value = value;
if ((++e - g_events) >= ARRAY_SIZE(g_events))
e = g_events;
e->type = -1;
g_e = e;
*/
}
struct input_event *for_each_valid_event(struct input_event *e)
{
struct input_event *estart = e++;
if ((e - g_events) >= ARRAY_SIZE(g_events))
e = g_events;
while (e->type == (__u16) -1 && e != estart) {
e++;
if ((e - g_events) >= ARRAY_SIZE(g_events))
e = g_events;
}
if (e == estart || e->type == (__u16) -1)
return NULL;
return e;
}
#endif
/* This part always compiles to support conditional debug control */
#ifdef MDT_SUPPORT_DEBUG
static ssize_t show_events(struct device *dev, struct device_attribute *attr, char *buf)
{
struct input_event *e = g_e;
int ret = 0;
if (g_mdt_is_ready == 0)
return 0;
while ((e = for_each_valid_event(e)) != NULL) {
ret += sprintf(buf + ret, "%8lu.%06lu,", e->time.tv_sec, e->time.tv_usec);
switch (e->type) {
case IRQ_HEARTBEAT:
ret += sprintf(buf + ret, "%s", "IRQ_HEARTBEAT,");
break;
case IRQ_WAKE:
ret += sprintf(buf + ret, "%s", "IRQ_WAKE,");
break;
case IRQ_RECEIVED:
ret += sprintf(buf + ret, "%s", "IRQ_RECEIVED,");
break;
case ISR_WRITEBURST_CAUGHT:
ret += sprintf(buf + ret, "%s", "ISR_WRITEBURST_CAUGHT,");
break;
case ISR_WRITEBURST_MISSED:
ret += sprintf(buf + ret, "%s", "ISR_WRITEBURST_MISSED,");
break;
case ISR_DEFFER_SCHEDULED:
ret += sprintf(buf + ret, "%s", "ISR_DEFFER_SCHEDULED,");
break;
case ISR_DEFFER_NO:
ret += sprintf(buf + ret, "%s", "ISR_DEFFER_NO,");
break;
case ISR_DEFFER_BEGIN:
ret += sprintf(buf + ret, "%s", "ISR_DEFFER_BEGIN,");
break;
case ISR_DEFFER_END:
ret += sprintf(buf + ret, "%s", "ISR_DEFFER_END,");
break;
case I2C_BLOCK_R_UNKNOWN:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_UNKNOWN_R,");
break;
case I2C_BLOCK_W_UNKNOWN:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_UNKNOWN_W,");
break;
case I2C_BLOCK_0x72:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x72,");
break;
case I2C_BLOCK_0x7A:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x7A,");
break;
case I2C_BLOCK_0x92:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x93,");
break;
case I2C_BLOCK_0xC8:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0xC8,");
break;
case I2C_BLOCK_0x73:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x73,");
break;
case I2C_BLOCK_0x7B:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x7B,");
break;
case I2C_BLOCK_0x93:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0x93,");
break;
case I2C_BLOCK_0xC9:
ret += sprintf(buf + ret, "%s", "I2C_BLOCK_0xC9,");
break;
case I2C_BYTE_0x72:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x72,");
break;
case I2C_BYTE_0x7A:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x7A,");
break;
case I2C_BYTE_0x92:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x93,");
break;
case I2C_BYTE_0xC8:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0xC8,");
break;
case I2C_BYTE_0x73:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x73,");
break;
case I2C_BYTE_0x7B:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x7B,");
break;
case I2C_BYTE_0x93:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0x93,");
break;
case I2C_BYTE_0xC9:
ret += sprintf(buf + ret, "%s", "I2C_BYTE_0xC9,");
break;
case ISR_THREADED_BEGIN:
ret += sprintf(buf + ret, "%s", "ISR_THREADED_BEGIN,");
break;
case ISR_THREADED_END:
ret += sprintf(buf + ret, "%s", "ISR_THREADED_END,");
break;
case ISR_MDT_BEGIN:
ret += sprintf(buf + ret, "%s", "ISR_MDT_BEGIN,");
break;
case ISR_MDT_END:
ret += sprintf(buf + ret, "%s", "ISR_MDT_END,");
break;
case ISR_FULL_BEGIN:
ret += sprintf(buf + ret, "%s", "ISR_FULL_BEGIN,");
break;
case ISR_FULL_END:
ret += sprintf(buf + ret, "%s", "ISR_FULL_END,");
break;
case TOUCHPAD_BEGIN:
ret += sprintf(buf + ret, "%s", "TOUCHPAD_BEGIN,");
break;
case TOUCHPAD_END:
ret += sprintf(buf + ret, "%s", "TOUCHPAD_END,");
break;
case MHL_ESTABLISHED:
ret += sprintf(buf + ret, "%s", "MHL_ESTABLISHED,");
break;
case MSC_READY:
ret += sprintf(buf + ret, "%s", "MSC_READY,");
break;
case MDT_EVENT_PARSED:
ret += sprintf(buf + ret, "%s", "MDT_EVENT_PARSED,");
break;
case MDT_UNLOCK:
ret += sprintf(buf + ret, "%s", "MDT_UNLOCK,");
break;
case MDT_LOCK:
ret += sprintf(buf + ret, "%s", "MDT_LOCK,");
break;
case MDT_DISCOVER_REQ:
ret += sprintf(buf + ret, "%s", "MDT_DISCOVERY_REQ,");
break;
}
ret += sprintf(buf + ret, "%02x,%02x,%02x\n", e->type, e->code, e->value);
e->type = -1;
if (ret > (PAGE_SIZE - 512))
return ret;
}
return ret;
}
static DEVICE_ATTR(MDT_file, S_IRUGO, show_events, NULL);
static void deregster_log_file(void)
{
if (g_mdt_is_ready == 0)
return;
if (g_mdt_class == 0)
return;
if (g_mdt_dev == 0)
return;
device_destroy(g_mdt_class, 0);
class_destroy(g_mdt_class);
g_mdt_class = 0;
g_mdt_dev = 0;
}
static void init_log_file(void)
{
if (g_mdt_is_ready != 0)
return;
if (g_mdt_class != 0)
return;
if (g_mdt_dev != 0)
return;
init_events();
g_mdt_is_ready = 1;
g_mdt_class = class_create(THIS_MODULE, "mdt");
if (IS_ERR(g_mdt_class)) {
printk(KERN_ERR "MDT ERR: Failed to create debug helper class.\n");
return;
}
g_mdt_dev = device_create(g_mdt_class, NULL, 0, NULL, "mdt_debug_dev");
if (IS_ERR(g_mdt_dev)) {
printk(KERN_ERR "MDT ERR: Failed to create debug helper device.\n");
return;
}
if (device_create_file(g_mdt_dev, &dev_attr_MDT_file) < 0) {
printk(KERN_ERR "MDT ERR: Failed to create debug helper device file.\n");
return;
}
}
#else
inline void MHL_log_event(int type, int code, int value)
{
printk(KERN_ERR "MDT %x %x %x\n", type, code, value);
}
#endif
#endif
| gpl-2.0 |
savell21/mangos | dep/ACE_wrappers/ace/Shared_Object.cpp | 76 | 1125 | // $Id: Shared_Object.cpp 80826 2008-03-04 14:51:23Z wotte $
#include "ace/Shared_Object.h"
#include "ace/Global_Macros.h"
#include "ace/config-all.h"
/* Provide the abstract base class used to access dynamic linking
facilities */
#if !defined (__ACE_INLINE__)
#include "ace/Shared_Object.inl"
#endif /* __ACE_INLINE__ */
ACE_RCSID (ace,
Shared_Object,
"$Id: Shared_Object.cpp 80826 2008-03-04 14:51:23Z wotte $")
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
// Initializes object when dynamic linking occurs.
int
ACE_Shared_Object::init (int, ACE_TCHAR *[])
{
ACE_TRACE ("ACE_Shared_Object::init");
return 0;
}
// Terminates object when dynamic unlinking occurs.
int
ACE_Shared_Object::fini (void)
{
ACE_TRACE ("ACE_Shared_Object::fini");
return 0;
}
// Returns information on active object.
int
ACE_Shared_Object::info (ACE_TCHAR **, size_t) const
{
ACE_TRACE ("ACE_Shared_Object::info");
return 0;
}
// Need to give a default implementation.
ACE_Shared_Object::~ACE_Shared_Object (void)
{
ACE_TRACE ("ACE_Shared_Object::~ACE_Shared_Object");
}
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
maliyu/SOM2416 | drivers/net/wan/hdlc_x25.c | 76 | 4945 | /*
* Generic HDLC support routines for Linux
* X.25 support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/pkt_sched.h>
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
#include <linux/hdlc.h>
#include <net/x25device.h>
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
{
struct sk_buff *skb;
unsigned char *ptr;
if ((skb = dev_alloc_skb(1)) == NULL) {
printk(KERN_ERR "%s: out of memory\n", dev->name);
return;
}
ptr = skb_put(skb, 1);
*ptr = code;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
}
static void x25_connected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, 1);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, 2);
}
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
skb_push(skb, 1);
if (skb_cow(skb, 1))
return NET_RX_DROP;
ptr = skb->data;
*ptr = 0;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
}
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
static int x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
int result;
/* X.25 to LAPB */
switch (skb->data[0]) {
case 0: /* Data to be transmitted */
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return 0;
case 1:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
x25_connected(dev, 0);
else
printk(KERN_ERR "%s: LAPB connect request "
"failed, error code = %i\n",
dev->name, result);
}
break;
case 2:
if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
x25_disconnected(dev, 0);
else
printk(KERN_ERR "%s: LAPB disconnect request "
"failed, error code = %i\n",
dev->name, result);
}
break;
default: /* to be defined */
break;
}
dev_kfree_skb(skb);
return 0;
}
static int x25_open(struct net_device *dev)
{
struct lapb_register_struct cb;
int result;
cb.connect_confirmation = x25_connected;
cb.connect_indication = x25_connected;
cb.disconnect_confirmation = x25_disconnected;
cb.disconnect_indication = x25_disconnected;
cb.data_indication = x25_data_indication;
cb.data_transmit = x25_data_transmit;
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return result;
return 0;
}
static void x25_close(struct net_device *dev)
{
lapb_unregister(dev);
}
static int x25_rx(struct sk_buff *skb)
{
struct hdlc_device_desc *desc = dev_to_desc(skb->dev);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
desc->stats.rx_dropped++;
return NET_RX_DROP;
}
if (lapb_data_received(skb->dev, skb) == LAPB_OK)
return NET_RX_SUCCESS;
desc->stats.rx_errors++;
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
.ioctl = x25_ioctl,
.module = THIS_MODULE,
};
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifr->ifr_settings.type = IF_PROTO_X25;
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_X25:
if(!capable(CAP_NET_ADMIN))
return -EPERM;
if(dev->flags & IFF_UP)
return -EBUSY;
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
if (result)
return result;
if ((result = attach_hdlc_protocol(dev, &proto,
x25_rx, 0)) != 0)
return result;
dev->hard_start_xmit = x25_xmit;
dev->type = ARPHRD_X25;
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
yyu168/linux | tools/vm/slabinfo.c | 76 | 37111 | // SPDX-License-Identifier: GPL-2.0
/*
* Slabinfo: Tool to get reports about slabs
*
* (C) 2007 sgi, Christoph Lameter
* (C) 2011 Linux Foundation, Christoph Lameter
*
* Compile with:
*
* gcc -o slabinfo slabinfo.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <dirent.h>
#include <strings.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include <getopt.h>
#include <regex.h>
#include <errno.h>
#define MAX_SLABS 500
#define MAX_ALIASES 500
#define MAX_NODES 1024
struct slabinfo {
char *name;
int alias;
int refs;
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
int hwcache_align, object_size, objs_per_slab;
int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;
unsigned long free_fastpath, free_slowpath;
unsigned long free_frozen, free_add_partial, free_remove_partial;
unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
unsigned long deactivate_to_head, deactivate_to_tail;
unsigned long deactivate_remote_frees, order_fallback;
unsigned long cmpxchg_double_cpu_fail, cmpxchg_double_fail;
unsigned long alloc_node_mismatch, deactivate_bypass;
unsigned long cpu_partial_alloc, cpu_partial_free;
int numa[MAX_NODES];
int numa_partial[MAX_NODES];
} slabinfo[MAX_SLABS];
struct aliasinfo {
char *name;
char *ref;
struct slabinfo *slab;
} aliasinfo[MAX_ALIASES];
int slabs;
int actual_slabs;
int aliases;
int alias_targets;
int highest_node;
char buffer[4096];
int show_empty;
int show_report;
int show_alias;
int show_slab;
int skip_zero = 1;
int show_numa;
int show_track;
int show_first_alias;
int validate;
int shrink;
int show_inverted;
int show_single_ref;
int show_totals;
int sort_size;
int sort_active;
int set_debug;
int show_ops;
int show_activity;
int output_lines = -1;
int sort_loss;
int extended_totals;
int show_bytes;
int unreclaim_only;
/* Debug options */
int sanity;
int redzone;
int poison;
int tracking;
int tracing;
int page_size;
regex_t pattern;
static void fatal(const char *x, ...)
{
va_list ap;
va_start(ap, x);
vfprintf(stderr, x, ap);
va_end(ap);
exit(EXIT_FAILURE);
}
static void usage(void)
{
printf("slabinfo 4/15/2011. (c) 2007 sgi/(c) 2011 Linux Foundation.\n\n"
"slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
"-A|--activity Most active slabs first\n"
"-d<options>|--debug=<options> Set/Clear Debug options\n"
"-D|--display-active Switch line format to activity\n"
"-e|--empty Show empty slabs\n"
"-f|--first-alias Show first alias\n"
"-h|--help Show usage information\n"
"-i|--inverted Inverted list\n"
"-l|--slabs Show slabs\n"
"-n|--numa Show NUMA information\n"
"-o|--ops Show kmem_cache_ops\n"
"-s|--shrink Shrink slabs\n"
"-r|--report Detailed report on single slabs\n"
"-S|--Size Sort by size\n"
"-t|--tracking Show alloc/free information\n"
"-T|--Totals Show summary information\n"
"-v|--validate Validate slabs\n"
"-z|--zero Include empty slabs\n"
"-1|--1ref Single reference\n"
"-N|--lines=K Show the first K slabs\n"
"-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
"-B|--Bytes Show size in bytes\n"
"-U|--Unreclaim Show unreclaimable slabs only\n"
"\nValid debug options (FZPUT may be combined)\n"
"a / A Switch on all debug options (=FZUP)\n"
"- Switch off all debug options\n"
"f / F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
"z / Z Redzoning\n"
"p / P Poisoning\n"
"u / U Tracking\n"
"t / T Tracing\n"
);
}
static unsigned long read_obj(const char *name)
{
FILE *f = fopen(name, "r");
if (!f)
buffer[0] = 0;
else {
if (!fgets(buffer, sizeof(buffer), f))
buffer[0] = 0;
fclose(f);
if (buffer[strlen(buffer)] == '\n')
buffer[strlen(buffer)] = 0;
}
return strlen(buffer);
}
/*
* Get the contents of an attribute
*/
static unsigned long get_obj(const char *name)
{
if (!read_obj(name))
return 0;
return atol(buffer);
}
static unsigned long get_obj_and_str(const char *name, char **x)
{
unsigned long result = 0;
char *p;
*x = NULL;
if (!read_obj(name)) {
x = NULL;
return 0;
}
result = strtoul(buffer, &p, 10);
while (*p == ' ')
p++;
if (*p)
*x = strdup(p);
return result;
}
static void set_obj(struct slabinfo *s, const char *name, int n)
{
char x[100];
FILE *f;
snprintf(x, 100, "%s/%s", s->name, name);
f = fopen(x, "w");
if (!f)
fatal("Cannot write to %s\n", x);
fprintf(f, "%d\n", n);
fclose(f);
}
static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
{
char x[100];
FILE *f;
size_t l;
snprintf(x, 100, "%s/%s", s->name, name);
f = fopen(x, "r");
if (!f) {
buffer[0] = 0;
l = 0;
} else {
l = fread(buffer, 1, sizeof(buffer), f);
buffer[l] = 0;
fclose(f);
}
return l;
}
/*
* Put a size string together
*/
static int store_size(char *buffer, unsigned long value)
{
unsigned long divisor = 1;
char trailer = 0;
int n;
if (!show_bytes) {
if (value > 1000000000UL) {
divisor = 100000000UL;
trailer = 'G';
} else if (value > 1000000UL) {
divisor = 100000UL;
trailer = 'M';
} else if (value > 1000UL) {
divisor = 100;
trailer = 'K';
}
}
value /= divisor;
n = sprintf(buffer, "%ld",value);
if (trailer) {
buffer[n] = trailer;
n++;
buffer[n] = 0;
}
if (divisor != 1) {
memmove(buffer + n - 2, buffer + n - 3, 4);
buffer[n-2] = '.';
n++;
}
return n;
}
static void decode_numa_list(int *numa, char *t)
{
int node;
int nr;
memset(numa, 0, MAX_NODES * sizeof(int));
if (!t)
return;
while (*t == 'N') {
t++;
node = strtoul(t, &t, 10);
if (*t == '=') {
t++;
nr = strtoul(t, &t, 10);
numa[node] = nr;
if (node > highest_node)
highest_node = node;
}
while (*t == ' ')
t++;
}
}
static void slab_validate(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
set_obj(s, "validate", 1);
}
static void slab_shrink(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
set_obj(s, "shrink", 1);
}
int line = 0;
static void first_line(void)
{
if (show_activity)
printf("Name Objects Alloc Free"
" %%Fast Fallb O CmpX UL\n");
else
printf("Name Objects Objsize %s "
"Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n",
sort_loss ? " Loss" : "Space");
}
/*
* Find the shortest alias of a slab
*/
static struct aliasinfo *find_one_alias(struct slabinfo *find)
{
struct aliasinfo *a;
struct aliasinfo *best = NULL;
for(a = aliasinfo;a < aliasinfo + aliases; a++) {
if (a->slab == find &&
(!best || strlen(best->name) < strlen(a->name))) {
best = a;
if (strncmp(a->name,"kmall", 5) == 0)
return best;
}
}
return best;
}
static unsigned long slab_size(struct slabinfo *s)
{
return s->slabs * (page_size << s->order);
}
static unsigned long slab_activity(struct slabinfo *s)
{
return s->alloc_fastpath + s->free_fastpath +
s->alloc_slowpath + s->free_slowpath;
}
static unsigned long slab_waste(struct slabinfo *s)
{
return slab_size(s) - s->objects * s->object_size;
}
static void slab_numa(struct slabinfo *s, int mode)
{
int node;
if (strcmp(s->name, "*") == 0)
return;
if (!highest_node) {
printf("\n%s: No NUMA information available.\n", s->name);
return;
}
if (skip_zero && !s->slabs)
return;
if (!line) {
printf("\n%-21s:", mode ? "NUMA nodes" : "Slab");
for(node = 0; node <= highest_node; node++)
printf(" %4d", node);
printf("\n----------------------");
for(node = 0; node <= highest_node; node++)
printf("-----");
printf("\n");
}
printf("%-21s ", mode ? "All slabs" : s->name);
for(node = 0; node <= highest_node; node++) {
char b[20];
store_size(b, s->numa[node]);
printf(" %4s", b);
}
printf("\n");
if (mode) {
printf("%-21s ", "Partial slabs");
for(node = 0; node <= highest_node; node++) {
char b[20];
store_size(b, s->numa_partial[node]);
printf(" %4s", b);
}
printf("\n");
}
line++;
}
static void show_tracking(struct slabinfo *s)
{
printf("\n%s: Kernel object allocation\n", s->name);
printf("-----------------------------------------------------------------------\n");
if (read_slab_obj(s, "alloc_calls"))
printf("%s", buffer);
else
printf("No Data\n");
printf("\n%s: Kernel object freeing\n", s->name);
printf("------------------------------------------------------------------------\n");
if (read_slab_obj(s, "free_calls"))
printf("%s", buffer);
else
printf("No Data\n");
}
static void ops(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
if (read_slab_obj(s, "ops")) {
printf("\n%s: kmem_cache operations\n", s->name);
printf("--------------------------------------------\n");
printf("%s", buffer);
} else
printf("\n%s has no kmem_cache operations\n", s->name);
}
static const char *onoff(int x)
{
if (x)
return "On ";
return "Off";
}
static void slab_stats(struct slabinfo *s)
{
unsigned long total_alloc;
unsigned long total_free;
unsigned long total;
if (!s->alloc_slab)
return;
total_alloc = s->alloc_fastpath + s->alloc_slowpath;
total_free = s->free_fastpath + s->free_slowpath;
if (!total_alloc)
return;
printf("\n");
printf("Slab Perf Counter Alloc Free %%Al %%Fr\n");
printf("--------------------------------------------------\n");
printf("Fastpath %8lu %8lu %3lu %3lu\n",
s->alloc_fastpath, s->free_fastpath,
s->alloc_fastpath * 100 / total_alloc,
total_free ? s->free_fastpath * 100 / total_free : 0);
printf("Slowpath %8lu %8lu %3lu %3lu\n",
total_alloc - s->alloc_fastpath, s->free_slowpath,
(total_alloc - s->alloc_fastpath) * 100 / total_alloc,
total_free ? s->free_slowpath * 100 / total_free : 0);
printf("Page Alloc %8lu %8lu %3lu %3lu\n",
s->alloc_slab, s->free_slab,
s->alloc_slab * 100 / total_alloc,
total_free ? s->free_slab * 100 / total_free : 0);
printf("Add partial %8lu %8lu %3lu %3lu\n",
s->deactivate_to_head + s->deactivate_to_tail,
s->free_add_partial,
(s->deactivate_to_head + s->deactivate_to_tail) * 100 / total_alloc,
total_free ? s->free_add_partial * 100 / total_free : 0);
printf("Remove partial %8lu %8lu %3lu %3lu\n",
s->alloc_from_partial, s->free_remove_partial,
s->alloc_from_partial * 100 / total_alloc,
total_free ? s->free_remove_partial * 100 / total_free : 0);
printf("Cpu partial list %8lu %8lu %3lu %3lu\n",
s->cpu_partial_alloc, s->cpu_partial_free,
s->cpu_partial_alloc * 100 / total_alloc,
total_free ? s->cpu_partial_free * 100 / total_free : 0);
printf("RemoteObj/SlabFrozen %8lu %8lu %3lu %3lu\n",
s->deactivate_remote_frees, s->free_frozen,
s->deactivate_remote_frees * 100 / total_alloc,
total_free ? s->free_frozen * 100 / total_free : 0);
printf("Total %8lu %8lu\n\n", total_alloc, total_free);
if (s->cpuslab_flush)
printf("Flushes %8lu\n", s->cpuslab_flush);
total = s->deactivate_full + s->deactivate_empty +
s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
if (total) {
printf("\nSlab Deactivation Occurrences %%\n");
printf("-------------------------------------------------\n");
printf("Slab full %7lu %3lu%%\n",
s->deactivate_full, (s->deactivate_full * 100) / total);
printf("Slab empty %7lu %3lu%%\n",
s->deactivate_empty, (s->deactivate_empty * 100) / total);
printf("Moved to head of partial list %7lu %3lu%%\n",
s->deactivate_to_head, (s->deactivate_to_head * 100) / total);
printf("Moved to tail of partial list %7lu %3lu%%\n",
s->deactivate_to_tail, (s->deactivate_to_tail * 100) / total);
printf("Deactivation bypass %7lu %3lu%%\n",
s->deactivate_bypass, (s->deactivate_bypass * 100) / total);
printf("Refilled from foreign frees %7lu %3lu%%\n",
s->alloc_refill, (s->alloc_refill * 100) / total);
printf("Node mismatch %7lu %3lu%%\n",
s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
}
if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
printf("\nCmpxchg_double Looping\n------------------------\n");
printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n",
s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
}
}
static void report(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
printf("\nSlabcache: %-15s Aliases: %2d Order : %2d Objects: %lu\n",
s->name, s->aliases, s->order, s->objects);
if (s->hwcache_align)
printf("** Hardware cacheline aligned\n");
if (s->cache_dma)
printf("** Memory is allocated in a special DMA zone\n");
if (s->destroy_by_rcu)
printf("** Slabs are destroyed via RCU\n");
if (s->reclaim_account)
printf("** Reclaim accounting active\n");
printf("\nSizes (bytes) Slabs Debug Memory\n");
printf("------------------------------------------------------------------------\n");
printf("Object : %7d Total : %7ld Sanity Checks : %s Total: %7ld\n",
s->object_size, s->slabs, onoff(s->sanity_checks),
s->slabs * (page_size << s->order));
printf("SlabObj: %7d Full : %7ld Redzoning : %s Used : %7ld\n",
s->slab_size, s->slabs - s->partial - s->cpu_slabs,
onoff(s->red_zone), s->objects * s->object_size);
printf("SlabSiz: %7d Partial: %7ld Poisoning : %s Loss : %7ld\n",
page_size << s->order, s->partial, onoff(s->poison),
s->slabs * (page_size << s->order) - s->objects * s->object_size);
printf("Loss : %7d CpuSlab: %7d Tracking : %s Lalig: %7ld\n",
s->slab_size - s->object_size, s->cpu_slabs, onoff(s->store_user),
(s->slab_size - s->object_size) * s->objects);
printf("Align : %7d Objects: %7d Tracing : %s Lpadd: %7ld\n",
s->align, s->objs_per_slab, onoff(s->trace),
((page_size << s->order) - s->objs_per_slab * s->slab_size) *
s->slabs);
ops(s);
show_tracking(s);
slab_numa(s, 1);
slab_stats(s);
}
static void slabcache(struct slabinfo *s)
{
char size_str[20];
char dist_str[40];
char flags[20];
char *p = flags;
if (strcmp(s->name, "*") == 0)
return;
if (unreclaim_only && s->reclaim_account)
return;
if (actual_slabs == 1) {
report(s);
return;
}
if (skip_zero && !show_empty && !s->slabs)
return;
if (show_empty && s->slabs)
return;
if (sort_loss == 0)
store_size(size_str, slab_size(s));
else
store_size(size_str, slab_waste(s));
snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
s->partial, s->cpu_slabs);
if (!line++)
first_line();
if (s->aliases)
*p++ = '*';
if (s->cache_dma)
*p++ = 'd';
if (s->hwcache_align)
*p++ = 'A';
if (s->poison)
*p++ = 'P';
if (s->reclaim_account)
*p++ = 'a';
if (s->red_zone)
*p++ = 'Z';
if (s->sanity_checks)
*p++ = 'F';
if (s->store_user)
*p++ = 'U';
if (s->trace)
*p++ = 'T';
*p = 0;
if (show_activity) {
unsigned long total_alloc;
unsigned long total_free;
total_alloc = s->alloc_fastpath + s->alloc_slowpath;
total_free = s->free_fastpath + s->free_slowpath;
printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d %4ld %4ld\n",
s->name, s->objects,
total_alloc, total_free,
total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
total_free ? (s->free_fastpath * 100 / total_free) : 0,
s->order_fallback, s->order, s->cmpxchg_double_fail,
s->cmpxchg_double_cpu_fail);
} else {
printf("%-21s %8ld %7d %15s %14s %4d %1d %3ld %3ld %s\n",
s->name, s->objects, s->object_size, size_str, dist_str,
s->objs_per_slab, s->order,
s->slabs ? (s->partial * 100) / s->slabs : 100,
s->slabs ? (s->objects * s->object_size * 100) /
(s->slabs * (page_size << s->order)) : 100,
flags);
}
}
/*
* Analyze debug options. Return false if something is amiss.
*/
static int debug_opt_scan(char *opt)
{
if (!opt || !opt[0] || strcmp(opt, "-") == 0)
return 1;
if (strcasecmp(opt, "a") == 0) {
sanity = 1;
poison = 1;
redzone = 1;
tracking = 1;
return 1;
}
for ( ; *opt; opt++)
switch (*opt) {
case 'F' : case 'f':
if (sanity)
return 0;
sanity = 1;
break;
case 'P' : case 'p':
if (poison)
return 0;
poison = 1;
break;
case 'Z' : case 'z':
if (redzone)
return 0;
redzone = 1;
break;
case 'U' : case 'u':
if (tracking)
return 0;
tracking = 1;
break;
case 'T' : case 't':
if (tracing)
return 0;
tracing = 1;
break;
default:
return 0;
}
return 1;
}
static int slab_empty(struct slabinfo *s)
{
if (s->objects > 0)
return 0;
/*
* We may still have slabs even if there are no objects. Shrinking will
* remove them.
*/
if (s->slabs != 0)
set_obj(s, "shrink", 1);
return 1;
}
static void slab_debug(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
if (sanity && !s->sanity_checks) {
set_obj(s, "sanity", 1);
}
if (!sanity && s->sanity_checks) {
if (slab_empty(s))
set_obj(s, "sanity", 0);
else
fprintf(stderr, "%s not empty cannot disable sanity checks\n", s->name);
}
if (redzone && !s->red_zone) {
if (slab_empty(s))
set_obj(s, "red_zone", 1);
else
fprintf(stderr, "%s not empty cannot enable redzoning\n", s->name);
}
if (!redzone && s->red_zone) {
if (slab_empty(s))
set_obj(s, "red_zone", 0);
else
fprintf(stderr, "%s not empty cannot disable redzoning\n", s->name);
}
if (poison && !s->poison) {
if (slab_empty(s))
set_obj(s, "poison", 1);
else
fprintf(stderr, "%s not empty cannot enable poisoning\n", s->name);
}
if (!poison && s->poison) {
if (slab_empty(s))
set_obj(s, "poison", 0);
else
fprintf(stderr, "%s not empty cannot disable poisoning\n", s->name);
}
if (tracking && !s->store_user) {
if (slab_empty(s))
set_obj(s, "store_user", 1);
else
fprintf(stderr, "%s not empty cannot enable tracking\n", s->name);
}
if (!tracking && s->store_user) {
if (slab_empty(s))
set_obj(s, "store_user", 0);
else
fprintf(stderr, "%s not empty cannot disable tracking\n", s->name);
}
if (tracing && !s->trace) {
if (slabs == 1)
set_obj(s, "trace", 1);
else
fprintf(stderr, "%s can only enable trace for one slab at a time\n", s->name);
}
if (!tracing && s->trace)
set_obj(s, "trace", 1);
}
static void totals(void)
{
struct slabinfo *s;
int used_slabs = 0;
char b1[20], b2[20], b3[20], b4[20];
unsigned long long max = 1ULL << 63;
/* Object size */
unsigned long long min_objsize = max, max_objsize = 0, avg_objsize;
/* Number of partial slabs in a slabcache */
unsigned long long min_partial = max, max_partial = 0,
avg_partial, total_partial = 0;
/* Number of slabs in a slab cache */
unsigned long long min_slabs = max, max_slabs = 0,
avg_slabs, total_slabs = 0;
/* Size of the whole slab */
unsigned long long min_size = max, max_size = 0,
avg_size, total_size = 0;
/* Bytes used for object storage in a slab */
unsigned long long min_used = max, max_used = 0,
avg_used, total_used = 0;
/* Waste: Bytes used for alignment and padding */
unsigned long long min_waste = max, max_waste = 0,
avg_waste, total_waste = 0;
/* Number of objects in a slab */
unsigned long long min_objects = max, max_objects = 0,
avg_objects, total_objects = 0;
/* Waste per object */
unsigned long long min_objwaste = max,
max_objwaste = 0, avg_objwaste,
total_objwaste = 0;
/* Memory per object */
unsigned long long min_memobj = max,
max_memobj = 0, avg_memobj,
total_objsize = 0;
/* Percentage of partial slabs per slab */
unsigned long min_ppart = 100, max_ppart = 0,
avg_ppart, total_ppart = 0;
/* Number of objects in partial slabs */
unsigned long min_partobj = max, max_partobj = 0,
avg_partobj, total_partobj = 0;
/* Percentage of partial objects of all objects in a slab */
unsigned long min_ppartobj = 100, max_ppartobj = 0,
avg_ppartobj, total_ppartobj = 0;
for (s = slabinfo; s < slabinfo + slabs; s++) {
unsigned long long size;
unsigned long used;
unsigned long long wasted;
unsigned long long objwaste;
unsigned long percentage_partial_slabs;
unsigned long percentage_partial_objs;
if (!s->slabs || !s->objects)
continue;
used_slabs++;
size = slab_size(s);
used = s->objects * s->object_size;
wasted = size - used;
objwaste = s->slab_size - s->object_size;
percentage_partial_slabs = s->partial * 100 / s->slabs;
if (percentage_partial_slabs > 100)
percentage_partial_slabs = 100;
percentage_partial_objs = s->objects_partial * 100
/ s->objects;
if (percentage_partial_objs > 100)
percentage_partial_objs = 100;
if (s->object_size < min_objsize)
min_objsize = s->object_size;
if (s->partial < min_partial)
min_partial = s->partial;
if (s->slabs < min_slabs)
min_slabs = s->slabs;
if (size < min_size)
min_size = size;
if (wasted < min_waste)
min_waste = wasted;
if (objwaste < min_objwaste)
min_objwaste = objwaste;
if (s->objects < min_objects)
min_objects = s->objects;
if (used < min_used)
min_used = used;
if (s->objects_partial < min_partobj)
min_partobj = s->objects_partial;
if (percentage_partial_slabs < min_ppart)
min_ppart = percentage_partial_slabs;
if (percentage_partial_objs < min_ppartobj)
min_ppartobj = percentage_partial_objs;
if (s->slab_size < min_memobj)
min_memobj = s->slab_size;
if (s->object_size > max_objsize)
max_objsize = s->object_size;
if (s->partial > max_partial)
max_partial = s->partial;
if (s->slabs > max_slabs)
max_slabs = s->slabs;
if (size > max_size)
max_size = size;
if (wasted > max_waste)
max_waste = wasted;
if (objwaste > max_objwaste)
max_objwaste = objwaste;
if (s->objects > max_objects)
max_objects = s->objects;
if (used > max_used)
max_used = used;
if (s->objects_partial > max_partobj)
max_partobj = s->objects_partial;
if (percentage_partial_slabs > max_ppart)
max_ppart = percentage_partial_slabs;
if (percentage_partial_objs > max_ppartobj)
max_ppartobj = percentage_partial_objs;
if (s->slab_size > max_memobj)
max_memobj = s->slab_size;
total_partial += s->partial;
total_slabs += s->slabs;
total_size += size;
total_waste += wasted;
total_objects += s->objects;
total_used += used;
total_partobj += s->objects_partial;
total_ppart += percentage_partial_slabs;
total_ppartobj += percentage_partial_objs;
total_objwaste += s->objects * objwaste;
total_objsize += s->objects * s->slab_size;
}
if (!total_objects) {
printf("No objects\n");
return;
}
if (!used_slabs) {
printf("No slabs\n");
return;
}
/* Per slab averages */
avg_partial = total_partial / used_slabs;
avg_slabs = total_slabs / used_slabs;
avg_size = total_size / used_slabs;
avg_waste = total_waste / used_slabs;
avg_objects = total_objects / used_slabs;
avg_used = total_used / used_slabs;
avg_partobj = total_partobj / used_slabs;
avg_ppart = total_ppart / used_slabs;
avg_ppartobj = total_ppartobj / used_slabs;
/* Per object object sizes */
avg_objsize = total_used / total_objects;
avg_objwaste = total_objwaste / total_objects;
avg_partobj = total_partobj * 100 / total_objects;
avg_memobj = total_objsize / total_objects;
printf("Slabcache Totals\n");
printf("----------------\n");
printf("Slabcaches : %15d Aliases : %11d->%-3d Active: %3d\n",
slabs, aliases, alias_targets, used_slabs);
store_size(b1, total_size);store_size(b2, total_waste);
store_size(b3, total_waste * 100 / total_used);
printf("Memory used: %15s # Loss : %15s MRatio:%6s%%\n", b1, b2, b3);
store_size(b1, total_objects);store_size(b2, total_partobj);
store_size(b3, total_partobj * 100 / total_objects);
printf("# Objects : %15s # PartObj: %15s ORatio:%6s%%\n", b1, b2, b3);
printf("\n");
printf("Per Cache Average "
"Min Max Total\n");
printf("---------------------------------------"
"-------------------------------------\n");
store_size(b1, avg_objects);store_size(b2, min_objects);
store_size(b3, max_objects);store_size(b4, total_objects);
printf("#Objects %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_slabs);store_size(b2, min_slabs);
store_size(b3, max_slabs);store_size(b4, total_slabs);
printf("#Slabs %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_partial);store_size(b2, min_partial);
store_size(b3, max_partial);store_size(b4, total_partial);
printf("#PartSlab %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_ppart);store_size(b2, min_ppart);
store_size(b3, max_ppart);
store_size(b4, total_partial * 100 / total_slabs);
printf("%%PartSlab%15s%% %15s%% %15s%% %15s%%\n",
b1, b2, b3, b4);
store_size(b1, avg_partobj);store_size(b2, min_partobj);
store_size(b3, max_partobj);
store_size(b4, total_partobj);
printf("PartObjs %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
store_size(b3, max_ppartobj);
store_size(b4, total_partobj * 100 / total_objects);
printf("%% PartObj%15s%% %15s%% %15s%% %15s%%\n",
b1, b2, b3, b4);
store_size(b1, avg_size);store_size(b2, min_size);
store_size(b3, max_size);store_size(b4, total_size);
printf("Memory %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_used);store_size(b2, min_used);
store_size(b3, max_used);store_size(b4, total_used);
printf("Used %15s %15s %15s %15s\n",
b1, b2, b3, b4);
store_size(b1, avg_waste);store_size(b2, min_waste);
store_size(b3, max_waste);store_size(b4, total_waste);
printf("Loss %15s %15s %15s %15s\n",
b1, b2, b3, b4);
printf("\n");
printf("Per Object Average "
"Min Max\n");
printf("---------------------------------------"
"--------------------\n");
store_size(b1, avg_memobj);store_size(b2, min_memobj);
store_size(b3, max_memobj);
printf("Memory %15s %15s %15s\n",
b1, b2, b3);
store_size(b1, avg_objsize);store_size(b2, min_objsize);
store_size(b3, max_objsize);
printf("User %15s %15s %15s\n",
b1, b2, b3);
store_size(b1, avg_objwaste);store_size(b2, min_objwaste);
store_size(b3, max_objwaste);
printf("Loss %15s %15s %15s\n",
b1, b2, b3);
}
static void sort_slabs(void)
{
struct slabinfo *s1,*s2;
for (s1 = slabinfo; s1 < slabinfo + slabs; s1++) {
for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
int result;
if (sort_size)
result = slab_size(s1) < slab_size(s2);
else if (sort_active)
result = slab_activity(s1) < slab_activity(s2);
else if (sort_loss)
result = slab_waste(s1) < slab_waste(s2);
else
result = strcasecmp(s1->name, s2->name);
if (show_inverted)
result = -result;
if (result > 0) {
struct slabinfo t;
memcpy(&t, s1, sizeof(struct slabinfo));
memcpy(s1, s2, sizeof(struct slabinfo));
memcpy(s2, &t, sizeof(struct slabinfo));
}
}
}
}
static void sort_aliases(void)
{
struct aliasinfo *a1,*a2;
for (a1 = aliasinfo; a1 < aliasinfo + aliases; a1++) {
for (a2 = a1 + 1; a2 < aliasinfo + aliases; a2++) {
char *n1, *n2;
n1 = a1->name;
n2 = a2->name;
if (show_alias && !show_inverted) {
n1 = a1->ref;
n2 = a2->ref;
}
if (strcasecmp(n1, n2) > 0) {
struct aliasinfo t;
memcpy(&t, a1, sizeof(struct aliasinfo));
memcpy(a1, a2, sizeof(struct aliasinfo));
memcpy(a2, &t, sizeof(struct aliasinfo));
}
}
}
}
static void link_slabs(void)
{
struct aliasinfo *a;
struct slabinfo *s;
for (a = aliasinfo; a < aliasinfo + aliases; a++) {
for (s = slabinfo; s < slabinfo + slabs; s++)
if (strcmp(a->ref, s->name) == 0) {
a->slab = s;
s->refs++;
break;
}
if (s == slabinfo + slabs)
fatal("Unresolved alias %s\n", a->ref);
}
}
static void alias(void)
{
struct aliasinfo *a;
char *active = NULL;
sort_aliases();
link_slabs();
for(a = aliasinfo; a < aliasinfo + aliases; a++) {
if (!show_single_ref && a->slab->refs == 1)
continue;
if (!show_inverted) {
if (active) {
if (strcmp(a->slab->name, active) == 0) {
printf(" %s", a->name);
continue;
}
}
printf("\n%-12s <- %s", a->slab->name, a->name);
active = a->slab->name;
}
else
printf("%-15s -> %s\n", a->name, a->slab->name);
}
if (active)
printf("\n");
}
static void rename_slabs(void)
{
struct slabinfo *s;
struct aliasinfo *a;
for (s = slabinfo; s < slabinfo + slabs; s++) {
if (*s->name != ':')
continue;
if (s->refs > 1 && !show_first_alias)
continue;
a = find_one_alias(s);
if (a)
s->name = a->name;
else {
s->name = "*";
actual_slabs--;
}
}
}
static int slab_mismatch(char *slab)
{
return regexec(&pattern, slab, 0, NULL, 0);
}
static void read_slab_dir(void)
{
DIR *dir;
struct dirent *de;
struct slabinfo *slab = slabinfo;
struct aliasinfo *alias = aliasinfo;
char *p;
char *t;
int count;
if (chdir("/sys/kernel/slab") && chdir("/sys/slab"))
fatal("SYSFS support for SLUB not active\n");
dir = opendir(".");
while ((de = readdir(dir))) {
if (de->d_name[0] == '.' ||
(de->d_name[0] != ':' && slab_mismatch(de->d_name)))
continue;
switch (de->d_type) {
case DT_LNK:
alias->name = strdup(de->d_name);
count = readlink(de->d_name, buffer, sizeof(buffer)-1);
if (count < 0)
fatal("Cannot read symlink %s\n", de->d_name);
buffer[count] = 0;
p = buffer + count;
while (p > buffer && p[-1] != '/')
p--;
alias->ref = strdup(p);
alias++;
break;
case DT_DIR:
if (chdir(de->d_name))
fatal("Unable to access slab %s\n", slab->name);
slab->name = strdup(de->d_name);
slab->alias = 0;
slab->refs = 0;
slab->aliases = get_obj("aliases");
slab->align = get_obj("align");
slab->cache_dma = get_obj("cache_dma");
slab->cpu_slabs = get_obj("cpu_slabs");
slab->destroy_by_rcu = get_obj("destroy_by_rcu");
slab->hwcache_align = get_obj("hwcache_align");
slab->object_size = get_obj("object_size");
slab->objects = get_obj("objects");
slab->objects_partial = get_obj("objects_partial");
slab->objects_total = get_obj("objects_total");
slab->objs_per_slab = get_obj("objs_per_slab");
slab->order = get_obj("order");
slab->partial = get_obj("partial");
slab->partial = get_obj_and_str("partial", &t);
decode_numa_list(slab->numa_partial, t);
free(t);
slab->poison = get_obj("poison");
slab->reclaim_account = get_obj("reclaim_account");
slab->red_zone = get_obj("red_zone");
slab->sanity_checks = get_obj("sanity_checks");
slab->slab_size = get_obj("slab_size");
slab->slabs = get_obj_and_str("slabs", &t);
decode_numa_list(slab->numa, t);
free(t);
slab->store_user = get_obj("store_user");
slab->trace = get_obj("trace");
slab->alloc_fastpath = get_obj("alloc_fastpath");
slab->alloc_slowpath = get_obj("alloc_slowpath");
slab->free_fastpath = get_obj("free_fastpath");
slab->free_slowpath = get_obj("free_slowpath");
slab->free_frozen= get_obj("free_frozen");
slab->free_add_partial = get_obj("free_add_partial");
slab->free_remove_partial = get_obj("free_remove_partial");
slab->alloc_from_partial = get_obj("alloc_from_partial");
slab->alloc_slab = get_obj("alloc_slab");
slab->alloc_refill = get_obj("alloc_refill");
slab->free_slab = get_obj("free_slab");
slab->cpuslab_flush = get_obj("cpuslab_flush");
slab->deactivate_full = get_obj("deactivate_full");
slab->deactivate_empty = get_obj("deactivate_empty");
slab->deactivate_to_head = get_obj("deactivate_to_head");
slab->deactivate_to_tail = get_obj("deactivate_to_tail");
slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
slab->order_fallback = get_obj("order_fallback");
slab->cmpxchg_double_cpu_fail = get_obj("cmpxchg_double_cpu_fail");
slab->cmpxchg_double_fail = get_obj("cmpxchg_double_fail");
slab->cpu_partial_alloc = get_obj("cpu_partial_alloc");
slab->cpu_partial_free = get_obj("cpu_partial_free");
slab->alloc_node_mismatch = get_obj("alloc_node_mismatch");
slab->deactivate_bypass = get_obj("deactivate_bypass");
chdir("..");
if (slab->name[0] == ':')
alias_targets++;
slab++;
break;
default :
fatal("Unknown file type %lx\n", de->d_type);
}
}
closedir(dir);
slabs = slab - slabinfo;
actual_slabs = slabs;
aliases = alias - aliasinfo;
if (slabs > MAX_SLABS)
fatal("Too many slabs\n");
if (aliases > MAX_ALIASES)
fatal("Too many aliases\n");
}
static void output_slabs(void)
{
struct slabinfo *slab;
int lines = output_lines;
for (slab = slabinfo; (slab < slabinfo + slabs) &&
lines != 0; slab++) {
if (slab->alias)
continue;
if (lines != -1)
lines--;
if (show_numa)
slab_numa(slab, 0);
else if (show_track)
show_tracking(slab);
else if (validate)
slab_validate(slab);
else if (shrink)
slab_shrink(slab);
else if (set_debug)
slab_debug(slab);
else if (show_ops)
ops(slab);
else if (show_slab)
slabcache(slab);
else if (show_report)
report(slab);
}
}
static void xtotals(void)
{
totals();
link_slabs();
rename_slabs();
printf("\nSlabs sorted by size\n");
printf("--------------------\n");
sort_loss = 0;
sort_size = 1;
sort_slabs();
output_slabs();
printf("\nSlabs sorted by loss\n");
printf("--------------------\n");
line = 0;
sort_loss = 1;
sort_size = 0;
sort_slabs();
output_slabs();
printf("\n");
}
struct option opts[] = {
{ "aliases", no_argument, NULL, 'a' },
{ "activity", no_argument, NULL, 'A' },
{ "debug", optional_argument, NULL, 'd' },
{ "display-activity", no_argument, NULL, 'D' },
{ "empty", no_argument, NULL, 'e' },
{ "first-alias", no_argument, NULL, 'f' },
{ "help", no_argument, NULL, 'h' },
{ "inverted", no_argument, NULL, 'i'},
{ "slabs", no_argument, NULL, 'l' },
{ "numa", no_argument, NULL, 'n' },
{ "ops", no_argument, NULL, 'o' },
{ "shrink", no_argument, NULL, 's' },
{ "report", no_argument, NULL, 'r' },
{ "Size", no_argument, NULL, 'S'},
{ "tracking", no_argument, NULL, 't'},
{ "Totals", no_argument, NULL, 'T'},
{ "validate", no_argument, NULL, 'v' },
{ "zero", no_argument, NULL, 'z' },
{ "1ref", no_argument, NULL, '1'},
{ "lines", required_argument, NULL, 'N'},
{ "Loss", no_argument, NULL, 'L'},
{ "Xtotals", no_argument, NULL, 'X'},
{ "Bytes", no_argument, NULL, 'B'},
{ "Unreclaim", no_argument, NULL, 'U'},
{ NULL, 0, NULL, 0 }
};
int main(int argc, char *argv[])
{
int c;
int err;
char *pattern_source;
page_size = getpagesize();
while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXBU",
opts, NULL)) != -1)
switch (c) {
case '1':
show_single_ref = 1;
break;
case 'a':
show_alias = 1;
break;
case 'A':
sort_active = 1;
break;
case 'd':
set_debug = 1;
if (!debug_opt_scan(optarg))
fatal("Invalid debug option '%s'\n", optarg);
break;
case 'D':
show_activity = 1;
break;
case 'e':
show_empty = 1;
break;
case 'f':
show_first_alias = 1;
break;
case 'h':
usage();
return 0;
case 'i':
show_inverted = 1;
break;
case 'n':
show_numa = 1;
break;
case 'o':
show_ops = 1;
break;
case 'r':
show_report = 1;
break;
case 's':
shrink = 1;
break;
case 'l':
show_slab = 1;
break;
case 't':
show_track = 1;
break;
case 'v':
validate = 1;
break;
case 'z':
skip_zero = 0;
break;
case 'T':
show_totals = 1;
break;
case 'S':
sort_size = 1;
break;
case 'N':
if (optarg) {
output_lines = atoi(optarg);
if (output_lines < 1)
output_lines = 1;
}
break;
case 'L':
sort_loss = 1;
break;
case 'X':
if (output_lines == -1)
output_lines = 1;
extended_totals = 1;
show_bytes = 1;
break;
case 'B':
show_bytes = 1;
break;
case 'U':
unreclaim_only = 1;
break;
default:
fatal("%s: Invalid option '%c'\n", argv[0], optopt);
}
if (!show_slab && !show_alias && !show_track && !show_report
&& !validate && !shrink && !set_debug && !show_ops)
show_slab = 1;
if (argc > optind)
pattern_source = argv[optind];
else
pattern_source = ".*";
err = regcomp(&pattern, pattern_source, REG_ICASE|REG_NOSUB);
if (err)
fatal("%s: Invalid pattern '%s' code %d\n",
argv[0], pattern_source, err);
read_slab_dir();
if (show_alias) {
alias();
} else if (extended_totals) {
xtotals();
} else if (show_totals) {
totals();
} else {
link_slabs();
rename_slabs();
sort_slabs();
output_slabs();
}
return 0;
}
| gpl-2.0 |
MassStash/htc_pme_kernel_sense_6.0 | drivers/char/xillybus/xillybus_of.c | 332 | 4459 | /*
* linux/drivers/misc/xillybus_of.c
*
* Copyright 2011 Xillybus Ltd, http://xillybus.com
*
* Driver for the Xillybus FPGA/host framework using Open Firmware.
*
* This program is free software; you can redistribute it and/or modify
* it under the smems of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/err.h>
#include "xillybus.h"
MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
MODULE_VERSION("1.06");
MODULE_ALIAS("xillybus_of");
MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus_of";
/* Match table for of_platform binding */
static struct of_device_id xillybus_of_match[] = {
{ .compatible = "xillybus,xillybus-1.00.a", },
{ .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
{}
};
MODULE_DEVICE_TABLE(of, xillybus_of_match);
static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
dma_addr_t dma_handle,
size_t size,
int direction)
{
dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
}
static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
dma_addr_t dma_handle,
size_t size,
int direction)
{
dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
}
static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
dma_addr_t dma_handle,
size_t size,
int direction)
{
}
static void xilly_of_unmap(void *ptr)
{
struct xilly_mapping *data = ptr;
dma_unmap_single(data->device, data->dma_addr,
data->size, data->direction);
kfree(ptr);
}
static int xilly_map_single_of(struct xilly_endpoint *ep,
void *ptr,
size_t size,
int direction,
dma_addr_t *ret_dma_handle
)
{
dma_addr_t addr;
struct xilly_mapping *this;
int rc;
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this)
return -ENOMEM;
addr = dma_map_single(ep->dev, ptr, size, direction);
if (dma_mapping_error(ep->dev, addr)) {
kfree(this);
return -ENODEV;
}
this->device = ep->dev;
this->dma_addr = addr;
this->size = size;
this->direction = direction;
*ret_dma_handle = addr;
rc = devm_add_action(ep->dev, xilly_of_unmap, this);
if (rc) {
dma_unmap_single(ep->dev, addr, size, direction);
kfree(this);
return rc;
}
return 0;
}
static struct xilly_endpoint_hardware of_hw = {
.owner = THIS_MODULE,
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
.hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
.map_single = xilly_map_single_of,
};
static struct xilly_endpoint_hardware of_hw_coherent = {
.owner = THIS_MODULE,
.hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
.hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
.map_single = xilly_map_single_of,
};
static int xilly_drv_probe(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint;
int rc;
int irq;
struct resource res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
ephw = &of_hw_coherent;
endpoint = xillybus_init_endpoint(NULL, dev, ephw);
if (!endpoint)
return -ENOMEM;
dev_set_drvdata(dev, endpoint);
rc = of_address_to_resource(dev->of_node, 0, &res);
endpoint->registers = devm_ioremap_resource(dev, &res);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
irq = irq_of_parse_and_map(dev->of_node, 0);
rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint);
if (rc) {
dev_err(endpoint->dev,
"Failed to register IRQ handler. Aborting.\n");
return -ENODEV;
}
return xillybus_endpoint_discovery(endpoint);
}
static int xilly_drv_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
xillybus_endpoint_remove(endpoint);
return 0;
}
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
.remove = xilly_drv_remove,
.driver = {
.name = xillyname,
.owner = THIS_MODULE,
.of_match_table = xillybus_of_match,
},
};
module_platform_driver(xillybus_platform_driver);
| gpl-2.0 |
Eih3/CHIP-linux | drivers/dma/mxs-dma.c | 332 | 24209 | /*
* Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Refer to drivers/dma/imx-sdma.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
#include <asm/irq.h>
#include "dmaengine.h"
/*
* NOTE: The term "PIO" throughout the mxs-dma implementation means
* PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
* dma can program the controller registers of peripheral devices.
*/
#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
#define HW_APBHX_CHANNEL_CTRL 0x030
#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
/*
* The offset of NXTCMDAR register is different per both dma type and version,
* while stride for each channel is all the same 0x70.
*/
#define HW_APBHX_CHn_NXTCMDAR(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
#define HW_APBHX_CHn_SEMA(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
#define HW_APBHX_CHn_BAR(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
/*
* ccw bits definitions
*
* COMMAND: 0..1 (2)
* CHAIN: 2 (1)
* IRQ: 3 (1)
* NAND_LOCK: 4 (1) - not implemented
* NAND_WAIT4READY: 5 (1) - not implemented
* DEC_SEM: 6 (1)
* WAIT4END: 7 (1)
* HALT_ON_TERMINATE: 8 (1)
* TERMINATE_FLUSH: 9 (1)
* RESERVED: 10..11 (2)
* PIO_NUM: 12..15 (4)
*/
#define BP_CCW_COMMAND 0
#define BM_CCW_COMMAND (3 << 0)
#define CCW_CHAIN (1 << 2)
#define CCW_IRQ (1 << 3)
#define CCW_DEC_SEM (1 << 6)
#define CCW_WAIT4END (1 << 7)
#define CCW_HALT_ON_TERM (1 << 8)
#define CCW_TERM_FLUSH (1 << 9)
#define BP_CCW_PIO_NUM 12
#define BM_CCW_PIO_NUM (0xf << 12)
#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
#define MXS_DMA_CMD_NO_XFER 0
#define MXS_DMA_CMD_WRITE 1
#define MXS_DMA_CMD_READ 2
#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
struct mxs_dma_ccw {
u32 next;
u16 bits;
u16 xfer_bytes;
#define MAX_XFER_BYTES 0xff00
u32 bufaddr;
#define MXS_PIO_WORDS 16
u32 pio_words[MXS_PIO_WORDS];
};
#define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
#define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
struct mxs_dma_chan {
struct mxs_dma_engine *mxs_dma;
struct dma_chan chan;
struct dma_async_tx_descriptor desc;
struct tasklet_struct tasklet;
unsigned int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
int desc_count;
enum dma_status status;
unsigned int flags;
bool reset;
#define MXS_DMA_SG_LOOP (1 << 0)
#define MXS_DMA_USE_SEMAPHORE (1 << 1)
};
#define MXS_DMA_CHANNELS 16
#define MXS_DMA_CHANNELS_MASK 0xffff
enum mxs_dma_devtype {
MXS_DMA_APBH,
MXS_DMA_APBX,
};
enum mxs_dma_id {
IMX23_DMA,
IMX28_DMA,
};
struct mxs_dma_engine {
enum mxs_dma_id dev_id;
enum mxs_dma_devtype type;
void __iomem *base;
struct clk *clk;
struct dma_device dma_device;
struct device_dma_parameters dma_parms;
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
struct platform_device *pdev;
unsigned int nr_channels;
};
struct mxs_dma_type {
enum mxs_dma_id id;
enum mxs_dma_devtype type;
};
static struct mxs_dma_type mxs_dma_types[] = {
{
.id = IMX23_DMA,
.type = MXS_DMA_APBH,
}, {
.id = IMX23_DMA,
.type = MXS_DMA_APBX,
}, {
.id = IMX28_DMA,
.type = MXS_DMA_APBH,
}, {
.id = IMX28_DMA,
.type = MXS_DMA_APBX,
}
};
static struct platform_device_id mxs_dma_ids[] = {
{
.name = "imx23-dma-apbh",
.driver_data = (kernel_ulong_t) &mxs_dma_types[0],
}, {
.name = "imx23-dma-apbx",
.driver_data = (kernel_ulong_t) &mxs_dma_types[1],
}, {
.name = "imx28-dma-apbh",
.driver_data = (kernel_ulong_t) &mxs_dma_types[2],
}, {
.name = "imx28-dma-apbx",
.driver_data = (kernel_ulong_t) &mxs_dma_types[3],
}, {
/* end of list */
}
};
static const struct of_device_id mxs_dma_dt_ids[] = {
{ .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
{ .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
{ .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
{ .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
{
return container_of(chan, struct mxs_dma_chan, chan);
}
static void mxs_dma_reset_chan(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/*
* mxs dma channel resets can cause a channel stall. To recover from a
* channel stall, we have to reset the whole DMA engine. To avoid this,
* we use cyclic DMA with semaphores, that are enhanced in
* mxs_dma_int_handler. To reset the channel, we can simply stop writing
* into the semaphore counter.
*/
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
mxs_chan->reset = true;
} else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
} else {
unsigned long elapsed = 0;
const unsigned long max_wait = 50000; /* 50ms */
void __iomem *reg_dbg1 = mxs_dma->base +
HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
/*
* On i.MX28 APBX, the DMA channel can stop working if we reset
* the channel while it is in READ_FLUSH (0x08) state.
* We wait here until we leave the state. Then we trigger the
* reset. Waiting a maximum of 50ms, the kernel shouldn't crash
* because of this.
*/
while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
udelay(100);
elapsed += 100;
}
if (elapsed >= max_wait)
dev_err(&mxs_chan->mxs_dma->pdev->dev,
"Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
chan_id);
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
}
mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_enable_chan(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
/* A cyclic DMA consists of at least 2 segments, so initialize
* the semaphore with 2 so we have enough time to add 1 to the
* semaphore if we need to */
writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
} else {
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
}
mxs_chan->reset = false;
}
static void mxs_dma_disable_chan(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
mxs_chan->status = DMA_COMPLETE;
}
static int mxs_dma_pause_chan(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* freeze the channel */
if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
mxs_chan->status = DMA_PAUSED;
return 0;
}
static int mxs_dma_resume_chan(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* unfreeze the channel */
if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
mxs_chan->status = DMA_IN_PROGRESS;
return 0;
}
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
return dma_cookie_assign(tx);
}
static void mxs_dma_tasklet(unsigned long data)
{
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
if (mxs_chan->desc.callback)
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
}
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
{
int i;
for (i = 0; i != mxs_dma->nr_channels; ++i)
if (mxs_dma->mxs_chans[i].chan_irq == irq)
return i;
return -EINVAL;
}
static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
{
struct mxs_dma_engine *mxs_dma = dev_id;
struct mxs_dma_chan *mxs_chan;
u32 completed;
u32 err;
int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
if (chan < 0)
return IRQ_NONE;
/* completion status */
completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
completed = (completed >> chan) & 0x1;
/* Clear interrupt */
writel((1 << chan),
mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
err = readl(mxs_dma->base + HW_APBHX_CTRL2);
err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
/*
* error status bit is in the upper 16 bits, error irq bit in the lower
* 16 bits. We transform it into a simpler error code:
* err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
*/
err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
/* Clear error irq */
writel((1 << chan),
mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
* an error we need to handle here in case of either it's a bus
* error or a termination error with no completion. 0x01 is termination
* error, so we can subtract err & completed to get the real error case.
*/
err -= err & completed;
mxs_chan = &mxs_dma->mxs_chans[chan];
if (err) {
dev_dbg(mxs_dma->dma_device.dev,
"%s: error in channel %d\n", __func__,
chan);
mxs_chan->status = DMA_ERROR;
mxs_dma_reset_chan(&mxs_chan->chan);
} else if (mxs_chan->status != DMA_COMPLETE) {
if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
mxs_chan->status = DMA_IN_PROGRESS;
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
writel(1, mxs_dma->base +
HW_APBHX_CHn_SEMA(mxs_dma, chan));
} else {
mxs_chan->status = DMA_COMPLETE;
}
}
if (mxs_chan->status == DMA_COMPLETE) {
if (mxs_chan->reset)
return IRQ_HANDLED;
dma_cookie_complete(&mxs_chan->desc);
}
/* schedule tasklet on this channel */
tasklet_schedule(&mxs_chan->tasklet);
return IRQ_HANDLED;
}
static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE,
&mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
}
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
if (ret)
goto err_irq;
}
ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_clk;
mxs_dma_reset_chan(chan);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
/* the descriptor is ready */
async_tx_ack(&mxs_chan->desc);
return 0;
err_clk:
free_irq(mxs_chan->chan_irq, mxs_dma);
err_irq:
dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
err_alloc:
return ret;
}
static void mxs_dma_free_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
mxs_dma_disable_chan(chan);
free_irq(mxs_chan->chan_irq, mxs_dma);
dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
clk_disable_unprepare(mxs_dma->clk);
}
/*
* How to use the flags for ->device_prep_slave_sg() :
* [1] If there is only one DMA command in the DMA chain, the code should be:
* ......
* ->device_prep_slave_sg(DMA_CTRL_ACK);
* ......
* [2] If there are two DMA commands in the DMA chain, the code should be
* ......
* ->device_prep_slave_sg(0);
* ......
* ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
* ......
* [3] If there are more than two DMA commands in the DMA chain, the code
* should be:
* ......
* ->device_prep_slave_sg(0); // First
* ......
* ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
* ......
* ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
* ......
*/
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
struct mxs_dma_ccw *ccw;
struct scatterlist *sg;
u32 i, j;
u32 *pio;
bool append = flags & DMA_PREP_INTERRUPT;
int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
if (sg_len + (append ? idx : 0) > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
"maximum number of sg exceeded: %d > %d\n",
sg_len, NUM_CCW);
goto err_out;
}
mxs_chan->status = DMA_IN_PROGRESS;
mxs_chan->flags = 0;
/*
* If the sg is prepared with append flag set, the sg
* will be appended to the last prepared sg.
*/
if (append) {
BUG_ON(idx < 1);
ccw = &mxs_chan->ccw[idx - 1];
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bits |= CCW_CHAIN;
ccw->bits &= ~CCW_IRQ;
ccw->bits &= ~CCW_DEC_SEM;
} else {
idx = 0;
}
if (direction == DMA_TRANS_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
for (j = 0; j < sg_len;)
ccw->pio_words[j++] = *pio++;
ccw->bits = 0;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
if (flags & DMA_CTRL_ACK)
ccw->bits |= CCW_WAIT4END;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(sg_len, PIO_NUM);
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
} else {
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
sg_dma_len(sg), MAX_XFER_BYTES);
goto err_out;
}
ccw = &mxs_chan->ccw[idx++];
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bufaddr = sg->dma_address;
ccw->xfer_bytes = sg_dma_len(sg);
ccw->bits = 0;
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
if (i + 1 == sg_len) {
ccw->bits &= ~CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_DEC_SEM;
if (flags & DMA_CTRL_ACK)
ccw->bits |= CCW_WAIT4END;
}
}
}
mxs_chan->desc_count = idx;
return &mxs_chan->desc;
err_out:
mxs_chan->status = DMA_ERROR;
return NULL;
}
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
u32 num_periods = buf_len / period_len;
u32 i = 0, buf = 0;
if (mxs_chan->status == DMA_IN_PROGRESS)
return NULL;
mxs_chan->status = DMA_IN_PROGRESS;
mxs_chan->flags |= MXS_DMA_SG_LOOP;
mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
if (num_periods > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
"maximum number of sg exceeded: %d > %d\n",
num_periods, NUM_CCW);
goto err_out;
}
if (period_len > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev,
"maximum period size exceeded: %d > %d\n",
period_len, MAX_XFER_BYTES);
goto err_out;
}
while (buf < buf_len) {
struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
if (i + 1 == num_periods)
ccw->next = mxs_chan->ccw_phys;
else
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
ccw->bufaddr = dma_addr;
ccw->xfer_bytes = period_len;
ccw->bits = 0;
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= CCW_DEC_SEM;
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
buf += period_len;
i++;
}
mxs_chan->desc_count = i;
return &mxs_chan->desc;
err_out:
mxs_chan->status = DMA_ERROR;
return NULL;
}
static int mxs_dma_terminate_all(struct dma_chan *chan)
{
mxs_dma_reset_chan(chan);
mxs_dma_disable_chan(chan);
return 0;
}
static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
u32 residue = 0;
if (mxs_chan->status == DMA_IN_PROGRESS &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
struct mxs_dma_ccw *last_ccw;
u32 bar;
last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
bar = readl(mxs_dma->base +
HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
residue -= bar;
}
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
residue);
return mxs_chan->status;
}
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
return ret;
ret = stmp_reset_block(mxs_dma->base);
if (ret)
goto err_out;
/* enable apbh burst */
if (dma_is_apbh(mxs_dma)) {
writel(BM_APBH_CTRL0_APB_BURST_EN,
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_APBH_CTRL0_APB_BURST8_EN,
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
}
/* enable irq for all the channels */
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
err_out:
clk_disable_unprepare(mxs_dma->clk);
return ret;
}
struct mxs_dma_filter_param {
struct device_node *of_node;
unsigned int chan_id;
};
static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct mxs_dma_filter_param *param = fn_param;
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_irq;
if (mxs_dma->dma_device.dev->of_node != param->of_node)
return false;
if (chan->chan_id != param->chan_id)
return false;
chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
if (chan_irq < 0)
return false;
mxs_chan->chan_irq = chan_irq;
return true;
}
static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
struct mxs_dma_filter_param param;
if (dma_spec->args_count != 1)
return NULL;
param.of_node = ofdma->of_node;
param.chan_id = dma_spec->args[0];
if (param.chan_id >= mxs_dma->nr_channels)
return NULL;
return dma_request_channel(mask, mxs_dma_filter_fn, ¶m);
}
static int __init mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct platform_device_id *id_entry;
const struct of_device_id *of_id;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
if (!mxs_dma)
return -ENOMEM;
ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
if (ret) {
dev_err(&pdev->dev, "failed to read dma-channels\n");
return ret;
}
of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
if (of_id)
id_entry = of_id->data;
else
id_entry = platform_get_device_id(pdev);
dma_type = (struct mxs_dma_type *)id_entry->driver_data;
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);
mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mxs_dma->clk))
return PTR_ERR(mxs_dma->clk);
dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
/* Initialize channel parameters */
for (i = 0; i < MXS_DMA_CHANNELS; i++) {
struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
mxs_chan->mxs_dma = mxs_dma;
mxs_chan->chan.device = &mxs_dma->dma_device;
dma_cookie_init(&mxs_chan->chan);
tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
(unsigned long) mxs_chan);
/* Add the channel to mxs_chan list */
list_add_tail(&mxs_chan->chan.device_node,
&mxs_dma->dma_device.channels);
}
ret = mxs_dma_init(mxs_dma);
if (ret)
return ret;
mxs_dma->pdev = pdev;
mxs_dma->dma_device.dev = &pdev->dev;
/* mxs_dma gets 65535 bytes maximum sg size */
mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
ret = dma_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
}
ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");
return 0;
}
static struct platform_driver mxs_dma_driver = {
.driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
.id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
{
return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
}
subsys_initcall(mxs_dma_module_init);
| gpl-2.0 |
talnoah/Sprint-One-4.2.2-Sense | drivers/mfd/pm8xxx-misc.c | 332 | 27017 | /*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/mfd/pm8xxx/misc.h>
#define REG_PM8XXX_PON_CTRL_1 0x01C
#define PON_CTRL_1_PULL_UP_MASK 0xE0
#define PON_CTRL_1_USB_PWR_EN 0x10
#define PON_CTRL_1_WD_EN_MASK 0x08
#define PON_CTRL_1_WD_EN_RESET 0x08
#define PON_CTRL_1_WD_EN_PWR_OFF 0x00
#define REG_PM8058_PON_CNTL_4 0x098
#define REG_PM8901_PON_CNTL_4 0x099
#define REG_PM8018_PON_CNTL_4 0x01E
#define REG_PM8921_PON_CNTL_4 0x01E
#define REG_PM8058_PON_CNTL_5 0x07B
#define REG_PM8901_PON_CNTL_5 0x09A
#define REG_PM8018_PON_CNTL_5 0x01F
#define REG_PM8921_PON_CNTL_5 0x01F
#define PON_CTRL_4_RESET_EN_MASK 0x01
#define PON_CTRL_4_SHUTDOWN_ON_RESET 0x0
#define PON_CTRL_4_RESTART_ON_RESET 0x1
#define PON_CTRL_5_HARD_RESET_EN_MASK 0x08
#define PON_CTRL_5_HARD_RESET_EN 0x08
#define PON_CTRL_5_HARD_RESET_DIS 0x00
#define REG_PM8058_VREG_EN_MSM 0x018
#define REG_PM8058_VREG_EN_GRP_5_4 0x1C8
#define REG_PM8058_S0_CTRL 0x004
#define REG_PM8058_S1_CTRL 0x005
#define REG_PM8058_S3_CTRL 0x111
#define REG_PM8058_L21_CTRL 0x120
#define REG_PM8058_L22_CTRL 0x121
#define PM8058_REGULATOR_ENABLE_MASK 0x80
#define PM8058_REGULATOR_ENABLE 0x80
#define PM8058_REGULATOR_DISABLE 0x00
#define PM8058_REGULATOR_PULL_DOWN_MASK 0x40
#define PM8058_REGULATOR_PULL_DOWN_EN 0x40
#define PM8058_SMPS_LEGACY_VREF_SEL 0x20
#define PM8058_SMPS_LEGACY_VPROG_MASK 0x1F
#define PM8058_SMPS_ADVANCED_BAND_MASK 0xC0
#define PM8058_SMPS_ADVANCED_BAND_SHIFT 6
#define PM8058_SMPS_ADVANCED_VPROG_MASK 0x3F
#define REG_PM8058_S0_TEST2 0x084
#define REG_PM8058_S1_TEST2 0x085
#define REG_PM8058_S3_TEST2 0x11A
#define PM8058_REGULATOR_BANK_WRITE 0x80
#define PM8058_REGULATOR_BANK_MASK 0x70
#define PM8058_REGULATOR_BANK_SHIFT 4
#define PM8058_REGULATOR_BANK_SEL(n) ((n) << PM8058_REGULATOR_BANK_SHIFT)
#define PM8058_SMPS_LEGACY_VLOW_SEL 0x01
#define PM8058_SMPS_ADVANCED_MODE_MASK 0x02
#define PM8058_SMPS_ADVANCED_MODE 0x02
#define PM8058_SMPS_LEGACY_MODE 0x00
#define REG_PM8058_SLEEP_CTRL 0x02B
#define REG_PM8921_SLEEP_CTRL 0x10A
#define REG_PM8018_SLEEP_CTRL 0x10A
#define SLEEP_CTRL_SMPL_EN_MASK 0x04
#define SLEEP_CTRL_SMPL_EN_RESET 0x04
#define SLEEP_CTRL_SMPL_EN_PWR_OFF 0x00
#define SLEEP_CTRL_SMPL_SEL_MASK 0x03
#define SLEEP_CTRL_SMPL_SEL_MIN 0
#define SLEEP_CTRL_SMPL_SEL_MAX 3
#define REG_PM8901_REGULATOR_S1_PMR 0xA7
#define REG_PM8901_REGULATOR_S2_PMR 0xA8
#define REG_PM8901_REGULATOR_S3_PMR 0xA9
#define REG_PM8901_REGULATOR_S4_PMR 0xAA
#define PM8901_REGULATOR_PMR_STATE_MASK 0x60
#define PM8901_REGULATOR_PMR_STATE_OFF 0x20
#define REG_PM8058_COIN_CHG 0x02F
#define REG_PM8921_COIN_CHG 0x09C
#define REG_PM8018_COIN_CHG 0x09C
#define COINCELL_RESISTOR_SHIFT 0x2
#define REG_PM8XXX_GP_TEST_1 0x07A
#define PM8XXX_STAY_ON_CFG 0x92
#define REG_PM8XXX_GPIO_MUX_CTRL 0x1CC
#define UART_PATH_SEL_MASK 0x60
#define UART_PATH_SEL_SHIFT 0x5
#define USB_ID_PU_EN_MASK 0x10
#define USB_ID_PU_EN_SHIFT 4
#define PM8901_DELAY_AFTER_REG_DISABLE_MS 4
#define PM8901_DELAY_BEFORE_SHUTDOWN_MS 8
#define REG_PM8XXX_XO_CNTRL_2 0x114
#define MP3_1_MASK 0xE0
#define MP3_2_MASK 0x1C
#define MP3_1_SHIFT 5
#define MP3_2_SHIFT 2
#define REG_HSED_BIAS0_CNTL2 0xA1
#define REG_HSED_BIAS1_CNTL2 0x135
#define REG_HSED_BIAS2_CNTL2 0x138
#define HSED_EN_MASK 0xC0
struct pm8xxx_misc_chip {
struct list_head link;
struct pm8xxx_misc_platform_data pdata;
struct device *dev;
enum pm8xxx_version version;
u64 osc_halt_count;
};
static LIST_HEAD(pm8xxx_misc_chips);
static DEFINE_SPINLOCK(pm8xxx_misc_chips_lock);
static int pm8xxx_misc_masked_write(struct pm8xxx_misc_chip *chip, u16 addr,
u8 mask, u8 val)
{
int rc;
u8 reg;
rc = pm8xxx_readb(chip->dev->parent, addr, ®);
if (rc) {
pr_err("pm8xxx_readb(0x%03X) failed, rc=%d\n", addr, rc);
return rc;
}
reg &= ~mask;
reg |= val & mask;
rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
if (rc)
pr_err("pm8xxx_writeb(0x%03X)=0x%02X failed, rc=%d\n", addr,
reg, rc);
return rc;
}
static int
__pm8058_disable_smps_locally_set_pull_down(struct pm8xxx_misc_chip *chip,
u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
u8 master_enable_bit)
{
int rc = 0;
u8 vref_sel, vlow_sel, band, vprog, bank, reg;
bank = PM8058_REGULATOR_BANK_SEL(7);
rc = pm8xxx_writeb(chip->dev->parent, test2_addr, bank);
if (rc) {
pr_err("%s: pm8xxx_writeb(0x%03X) failed: rc=%d\n", __func__,
test2_addr, rc);
goto done;
}
rc = pm8xxx_readb(chip->dev->parent, test2_addr, ®);
if (rc) {
pr_err("%s: FAIL pm8xxx_readb(0x%03X): rc=%d\n",
__func__, test2_addr, rc);
goto done;
}
if ((reg & PM8058_SMPS_ADVANCED_MODE_MASK) ==
PM8058_SMPS_ADVANCED_MODE) {
rc = pm8xxx_readb(chip->dev->parent, ctrl_addr, ®);
if (rc) {
pr_err("%s: FAIL pm8xxx_readb(0x%03X): rc=%d\n",
__func__, ctrl_addr, rc);
goto done;
}
band = (reg & PM8058_SMPS_ADVANCED_BAND_MASK)
>> PM8058_SMPS_ADVANCED_BAND_SHIFT;
switch (band) {
case 3:
vref_sel = 0;
vlow_sel = 0;
break;
case 2:
vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
vlow_sel = 0;
break;
case 1:
vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
break;
default:
pr_err("%s: regulator already disabled\n", __func__);
return -EPERM;
}
vprog = (reg & PM8058_SMPS_ADVANCED_VPROG_MASK);
vprog = (vprog + 1) >> 1;
if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
bank = PM8058_REGULATOR_BANK_SEL(1);
rc = pm8xxx_writeb(chip->dev->parent, test2_addr, bank);
if (rc) {
pr_err("%s: FAIL pm8xxx_writeb(0x%03X): rc=%d\n",
__func__, test2_addr, rc);
goto done;
}
rc = pm8xxx_misc_masked_write(chip, test2_addr,
PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
| PM8058_SMPS_LEGACY_VLOW_SEL,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
if (rc)
goto done;
bank = PM8058_REGULATOR_BANK_SEL(7);
rc = pm8xxx_writeb(chip->dev->parent, test2_addr, bank);
if (rc) {
pr_err("%s: FAIL pm8xxx_writeb(0x%03X): rc=%d\n",
__func__, test2_addr, rc);
goto done;
}
rc = pm8xxx_misc_masked_write(chip, test2_addr,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_MASK |
PM8058_SMPS_ADVANCED_MODE_MASK,
PM8058_REGULATOR_BANK_WRITE |
PM8058_REGULATOR_BANK_SEL(7) |
PM8058_SMPS_LEGACY_MODE);
if (rc)
goto done;
rc = pm8xxx_misc_masked_write(chip, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK |
PM8058_REGULATOR_PULL_DOWN_MASK |
PM8058_SMPS_LEGACY_VREF_SEL |
PM8058_SMPS_LEGACY_VPROG_MASK,
PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
| vref_sel | vprog);
if (rc)
goto done;
}
rc = pm8xxx_misc_masked_write(chip, master_enable_addr,
master_enable_bit, master_enable_bit);
if (rc)
goto done;
rc = pm8xxx_misc_masked_write(chip, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
done:
return rc;
}
static int
__pm8058_disable_ldo_locally_set_pull_down(struct pm8xxx_misc_chip *chip,
u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
{
int rc;
rc = pm8xxx_misc_masked_write(chip, master_enable_addr,
master_enable_bit, master_enable_bit);
if (rc)
goto done;
rc = pm8xxx_misc_masked_write(chip, ctrl_addr,
PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
done:
return rc;
}
static int __pm8018_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
{
int rc;
rc = pm8xxx_misc_masked_write(chip, REG_PM8018_SLEEP_CTRL,
SLEEP_CTRL_SMPL_EN_MASK,
(reset ? SLEEP_CTRL_SMPL_EN_RESET : SLEEP_CTRL_SMPL_EN_PWR_OFF));
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
return rc;
}
rc = pm8xxx_misc_masked_write(chip, REG_PM8XXX_PON_CTRL_1,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| PON_CTRL_1_WD_EN_MASK,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| (reset ? PON_CTRL_1_WD_EN_RESET : PON_CTRL_1_WD_EN_PWR_OFF));
if (rc)
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
return rc;
}
static int __pm8058_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
{
int rc;
if (!reset) {
__pm8058_disable_smps_locally_set_pull_down(chip,
REG_PM8058_S0_CTRL, REG_PM8058_S0_TEST2,
REG_PM8058_VREG_EN_MSM, BIT(7));
__pm8058_disable_smps_locally_set_pull_down(chip,
REG_PM8058_S1_CTRL, REG_PM8058_S1_TEST2,
REG_PM8058_VREG_EN_MSM, BIT(6));
__pm8058_disable_smps_locally_set_pull_down(chip,
REG_PM8058_S3_CTRL, REG_PM8058_S3_TEST2,
REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
__pm8058_disable_ldo_locally_set_pull_down(chip,
REG_PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
BIT(1));
}
rc = pm8xxx_misc_masked_write(chip, REG_PM8058_L22_CTRL, 0xBF, 0x93);
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
goto read_write_err;
}
rc = pm8xxx_misc_masked_write(chip, REG_PM8058_SLEEP_CTRL,
SLEEP_CTRL_SMPL_EN_MASK,
(reset ? SLEEP_CTRL_SMPL_EN_RESET : SLEEP_CTRL_SMPL_EN_PWR_OFF));
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
goto read_write_err;
}
rc = pm8xxx_misc_masked_write(chip, REG_PM8XXX_PON_CTRL_1,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| PON_CTRL_1_WD_EN_MASK,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| (reset ? PON_CTRL_1_WD_EN_RESET : PON_CTRL_1_WD_EN_PWR_OFF));
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
goto read_write_err;
}
read_write_err:
return rc;
}
static int __pm8901_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
{
int rc = 0, i;
u8 pmr_addr[4] = {
REG_PM8901_REGULATOR_S2_PMR,
REG_PM8901_REGULATOR_S3_PMR,
REG_PM8901_REGULATOR_S4_PMR,
REG_PM8901_REGULATOR_S1_PMR,
};
if (!reset) {
for (i = 0; i < 4; i++) {
rc = pm8xxx_misc_masked_write(chip, pmr_addr[i],
PM8901_REGULATOR_PMR_STATE_MASK,
PM8901_REGULATOR_PMR_STATE_OFF);
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, "
"rc=%d\n", rc);
goto read_write_err;
}
mdelay(PM8901_DELAY_AFTER_REG_DISABLE_MS);
}
}
read_write_err:
mdelay(PM8901_DELAY_BEFORE_SHUTDOWN_MS);
return rc;
}
static int __pm8921_reset_pwr_off(struct pm8xxx_misc_chip *chip, int reset)
{
int rc;
rc = pm8xxx_misc_masked_write(chip, REG_PM8921_SLEEP_CTRL,
SLEEP_CTRL_SMPL_EN_MASK,
(reset ? SLEEP_CTRL_SMPL_EN_RESET : SLEEP_CTRL_SMPL_EN_PWR_OFF));
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
goto read_write_err;
}
rc = pm8xxx_misc_masked_write(chip, REG_PM8XXX_PON_CTRL_1,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| PON_CTRL_1_WD_EN_MASK,
PON_CTRL_1_PULL_UP_MASK | PON_CTRL_1_USB_PWR_EN
| (reset ? PON_CTRL_1_WD_EN_RESET : PON_CTRL_1_WD_EN_PWR_OFF));
if (rc) {
pr_err("pm8xxx_misc_masked_write failed, rc=%d\n", rc);
goto read_write_err;
}
read_write_err:
return rc;
}
int pm8xxx_reset_pwr_off(int reset)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
rc = __pm8018_reset_pwr_off(chip, reset);
break;
case PM8XXX_VERSION_8058:
rc = __pm8058_reset_pwr_off(chip, reset);
break;
case PM8XXX_VERSION_8901:
rc = __pm8901_reset_pwr_off(chip, reset);
break;
case PM8XXX_VERSION_8038:
case PM8XXX_VERSION_8917:
case PM8XXX_VERSION_8921:
rc = __pm8921_reset_pwr_off(chip, reset);
break;
default:
break;
}
if (rc) {
pr_err("reset_pwr_off failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(pm8xxx_reset_pwr_off);
int pm8xxx_smpl_control(int enable)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8018_SLEEP_CTRL, SLEEP_CTRL_SMPL_EN_MASK,
(enable ? SLEEP_CTRL_SMPL_EN_RESET
: SLEEP_CTRL_SMPL_EN_PWR_OFF));
break;
case PM8XXX_VERSION_8058:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8058_SLEEP_CTRL, SLEEP_CTRL_SMPL_EN_MASK,
(enable ? SLEEP_CTRL_SMPL_EN_RESET
: SLEEP_CTRL_SMPL_EN_PWR_OFF));
break;
case PM8XXX_VERSION_8921:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8921_SLEEP_CTRL, SLEEP_CTRL_SMPL_EN_MASK,
(enable ? SLEEP_CTRL_SMPL_EN_RESET
: SLEEP_CTRL_SMPL_EN_PWR_OFF));
break;
default:
break;
}
if (rc) {
pr_err("setting smpl control failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_smpl_control);
int pm8xxx_smpl_set_delay(enum pm8xxx_smpl_delay delay)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
if (delay < SLEEP_CTRL_SMPL_SEL_MIN
|| delay > SLEEP_CTRL_SMPL_SEL_MAX) {
pr_err("%s: invalid delay specified: %d\n", __func__, delay);
return -EINVAL;
}
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8018_SLEEP_CTRL, SLEEP_CTRL_SMPL_SEL_MASK,
delay);
break;
case PM8XXX_VERSION_8058:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8058_SLEEP_CTRL, SLEEP_CTRL_SMPL_SEL_MASK,
delay);
break;
case PM8XXX_VERSION_8921:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8921_SLEEP_CTRL, SLEEP_CTRL_SMPL_SEL_MASK,
delay);
break;
default:
break;
}
if (rc) {
pr_err("setting smpl delay failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_smpl_set_delay);
int pm8xxx_coincell_chg_config(struct pm8xxx_coincell_chg *chg_config)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
u8 reg = 0, voltage, resistor;
int rc = 0;
if (chg_config == NULL) {
pr_err("chg_config is NULL\n");
return -EINVAL;
}
voltage = chg_config->voltage;
resistor = chg_config->resistor;
if (resistor < PM8XXX_COINCELL_RESISTOR_2100_OHMS ||
resistor > PM8XXX_COINCELL_RESISTOR_800_OHMS) {
pr_err("Invalid resistor value provided\n");
return -EINVAL;
}
if (voltage < PM8XXX_COINCELL_VOLTAGE_3p2V ||
(voltage > PM8XXX_COINCELL_VOLTAGE_3p0V &&
voltage != PM8XXX_COINCELL_VOLTAGE_2p5V)) {
pr_err("Invalid voltage value provided\n");
return -EINVAL;
}
if (chg_config->state == PM8XXX_COINCELL_CHG_DISABLE) {
reg = 0;
} else {
reg |= voltage;
reg |= (resistor << COINCELL_RESISTOR_SHIFT);
}
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
rc = pm8xxx_writeb(chip->dev->parent,
REG_PM8018_COIN_CHG, reg);
break;
case PM8XXX_VERSION_8058:
rc = pm8xxx_writeb(chip->dev->parent,
REG_PM8058_COIN_CHG, reg);
break;
case PM8XXX_VERSION_8921:
rc = pm8xxx_writeb(chip->dev->parent,
REG_PM8921_COIN_CHG, reg);
break;
default:
break;
}
if (rc) {
pr_err("coincell chg. config failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_coincell_chg_config);
int pm8xxx_watchdog_reset_control(int enable)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
case PM8XXX_VERSION_8058:
case PM8XXX_VERSION_8921:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8XXX_PON_CTRL_1, PON_CTRL_1_WD_EN_MASK,
(enable ? PON_CTRL_1_WD_EN_RESET
: PON_CTRL_1_WD_EN_PWR_OFF));
break;
default:
break;
}
if (rc) {
pr_err("setting WD reset control failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_watchdog_reset_control);
int pm8xxx_stay_on(void)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
case PM8XXX_VERSION_8058:
case PM8XXX_VERSION_8921:
rc = pm8xxx_writeb(chip->dev->parent,
REG_PM8XXX_GP_TEST_1, PM8XXX_STAY_ON_CFG);
break;
default:
break;
}
if (rc) {
pr_err("stay_on failed failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_stay_on);
static int
__pm8xxx_hard_reset_config(struct pm8xxx_misc_chip *chip,
enum pm8xxx_pon_config config, u16 pon4_addr, u16 pon5_addr)
{
int rc = 0;
switch (config) {
case PM8XXX_DISABLE_HARD_RESET:
rc = pm8xxx_misc_masked_write(chip, pon5_addr,
PON_CTRL_5_HARD_RESET_EN_MASK,
PON_CTRL_5_HARD_RESET_DIS);
break;
case PM8XXX_SHUTDOWN_ON_HARD_RESET:
rc = pm8xxx_misc_masked_write(chip, pon5_addr,
PON_CTRL_5_HARD_RESET_EN_MASK,
PON_CTRL_5_HARD_RESET_EN);
if (!rc) {
rc = pm8xxx_misc_masked_write(chip, pon4_addr,
PON_CTRL_4_RESET_EN_MASK,
PON_CTRL_4_SHUTDOWN_ON_RESET);
}
break;
case PM8XXX_RESTART_ON_HARD_RESET:
rc = pm8xxx_misc_masked_write(chip, pon5_addr,
PON_CTRL_5_HARD_RESET_EN_MASK,
PON_CTRL_5_HARD_RESET_EN);
if (!rc) {
rc = pm8xxx_misc_masked_write(chip, pon4_addr,
PON_CTRL_4_RESET_EN_MASK,
PON_CTRL_4_RESTART_ON_RESET);
}
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
int pm8xxx_hard_reset_config(enum pm8xxx_pon_config config)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
__pm8xxx_hard_reset_config(chip, config,
REG_PM8018_PON_CNTL_4, REG_PM8018_PON_CNTL_5);
break;
case PM8XXX_VERSION_8058:
__pm8xxx_hard_reset_config(chip, config,
REG_PM8058_PON_CNTL_4, REG_PM8058_PON_CNTL_5);
break;
case PM8XXX_VERSION_8901:
__pm8xxx_hard_reset_config(chip, config,
REG_PM8901_PON_CNTL_4, REG_PM8901_PON_CNTL_5);
break;
case PM8XXX_VERSION_8921:
__pm8xxx_hard_reset_config(chip, config,
REG_PM8921_PON_CNTL_4, REG_PM8921_PON_CNTL_5);
break;
default:
break;
}
if (rc) {
pr_err("hard reset config. failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_hard_reset_config);
static irqreturn_t pm8xxx_osc_halt_isr(int irq, void *data)
{
struct pm8xxx_misc_chip *chip = data;
u64 count = 0;
if (chip) {
chip->osc_halt_count++;
count = chip->osc_halt_count;
}
pr_crit("%s: OSC_HALT interrupt has triggered, 32 kHz XTAL oscillator"
" has halted (%llu)!\n", __func__, count);
return IRQ_HANDLED;
}
int pm8xxx_uart_gpio_mux_ctrl(enum pm8xxx_uart_path_sel uart_path_sel)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8018:
case PM8XXX_VERSION_8058:
case PM8XXX_VERSION_8921:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8XXX_GPIO_MUX_CTRL, UART_PATH_SEL_MASK,
uart_path_sel << UART_PATH_SEL_SHIFT);
break;
default:
break;
}
if (rc) {
pr_err("uart_gpio_mux_ctrl failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_uart_gpio_mux_ctrl);
int pm8xxx_usb_id_pullup(int enable)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = -ENXIO;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8921:
case PM8XXX_VERSION_8922:
case PM8XXX_VERSION_8917:
case PM8XXX_VERSION_8038:
rc = pm8xxx_misc_masked_write(chip,
REG_PM8XXX_GPIO_MUX_CTRL, USB_ID_PU_EN_MASK,
enable << USB_ID_PU_EN_SHIFT);
if (rc)
pr_err("Fail: reg=%x, rc=%d\n",
REG_PM8XXX_GPIO_MUX_CTRL, rc);
break;
default:
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_usb_id_pullup);
static int __pm8901_preload_dVdd(struct pm8xxx_misc_chip *chip)
{
int rc;
if (pm8xxx_get_revision(chip->dev->parent) >= PM8XXX_REVISION_8901_2p3)
return 0;
rc = pm8xxx_writeb(chip->dev->parent, 0x0BD, 0x0F);
if (rc)
pr_err("pm8xxx_writeb failed for 0x0BD, rc=%d\n", rc);
rc = pm8xxx_writeb(chip->dev->parent, 0x001, 0xB4);
if (rc)
pr_err("pm8xxx_writeb failed for 0x001, rc=%d\n", rc);
pr_info("dVdd preloaded\n");
return rc;
}
int pm8xxx_preload_dVdd(void)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8901:
rc = __pm8901_preload_dVdd(chip);
break;
default:
break;
}
if (rc) {
pr_err("preload_dVdd failed, rc=%d\n", rc);
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(pm8xxx_preload_dVdd);
int pm8xxx_aux_clk_control(enum pm8xxx_aux_clk_id clk_id,
enum pm8xxx_aux_clk_div divider, bool enable)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
u8 clk_mask = 0, value = 0;
if (clk_id == CLK_MP3_1) {
clk_mask = MP3_1_MASK;
value = divider << MP3_1_SHIFT;
} else if (clk_id == CLK_MP3_2) {
clk_mask = MP3_2_MASK;
value = divider << MP3_2_SHIFT;
} else {
pr_err("Invalid clock id of %d\n", clk_id);
return -EINVAL;
}
if (!enable)
value = 0;
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8038:
case PM8XXX_VERSION_8921:
pm8xxx_misc_masked_write(chip,
REG_PM8XXX_XO_CNTRL_2, clk_mask, value);
break;
default:
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(pm8xxx_aux_clk_control);
int pm8xxx_hsed_bias_control(enum pm8xxx_hsed_bias bias, bool enable)
{
struct pm8xxx_misc_chip *chip;
unsigned long flags;
int rc = 0;
u16 addr;
switch (bias) {
case PM8XXX_HSED_BIAS0:
addr = REG_HSED_BIAS0_CNTL2;
break;
case PM8XXX_HSED_BIAS1:
addr = REG_HSED_BIAS1_CNTL2;
break;
case PM8XXX_HSED_BIAS2:
addr = REG_HSED_BIAS2_CNTL2;
break;
default:
pr_err("Invalid BIAS line\n");
return -EINVAL;
}
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_for_each_entry(chip, &pm8xxx_misc_chips, link) {
switch (chip->version) {
case PM8XXX_VERSION_8058:
case PM8XXX_VERSION_8921:
rc = pm8xxx_misc_masked_write(chip, addr,
HSED_EN_MASK, enable ? HSED_EN_MASK : 0);
if (rc < 0)
pr_err("Enable HSED BIAS failed rc=%d\n", rc);
break;
default:
break;
}
}
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
return rc;
}
EXPORT_SYMBOL(pm8xxx_hsed_bias_control);
static int __devinit pm8xxx_misc_probe(struct platform_device *pdev)
{
const struct pm8xxx_misc_platform_data *pdata = pdev->dev.platform_data;
struct pm8xxx_misc_chip *chip;
struct pm8xxx_misc_chip *sibling;
struct list_head *prev;
unsigned long flags;
int rc = 0, irq;
if (!pdata) {
pr_err("missing platform data\n");
return -EINVAL;
}
chip = kzalloc(sizeof(struct pm8xxx_misc_chip), GFP_KERNEL);
if (!chip) {
pr_err("Cannot allocate %d bytes\n",
sizeof(struct pm8xxx_misc_chip));
return -ENOMEM;
}
chip->dev = &pdev->dev;
chip->version = pm8xxx_get_version(chip->dev->parent);
memcpy(&(chip->pdata), pdata, sizeof(struct pm8xxx_misc_platform_data));
irq = platform_get_irq_byname(pdev, "pm8xxx_osc_halt_irq");
if (irq > 0) {
rc = request_any_context_irq(irq, pm8xxx_osc_halt_isr,
IRQF_TRIGGER_RISING | IRQF_DISABLED,
"pm8xxx_osc_halt_irq", chip);
if (rc < 0) {
pr_err("%s: request_any_context_irq(%d) FAIL: %d\n",
__func__, irq, rc);
goto fail_irq;
}
}
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
prev = &pm8xxx_misc_chips;
list_for_each_entry(sibling, &pm8xxx_misc_chips, link) {
if (chip->pdata.priority < sibling->pdata.priority)
break;
else
prev = &sibling->link;
}
list_add(&chip->link, prev);
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
platform_set_drvdata(pdev, chip);
return rc;
fail_irq:
platform_set_drvdata(pdev, NULL);
kfree(chip);
return rc;
}
static int __devexit pm8xxx_misc_remove(struct platform_device *pdev)
{
struct pm8xxx_misc_chip *chip = platform_get_drvdata(pdev);
unsigned long flags;
int irq = platform_get_irq_byname(pdev, "pm8xxx_osc_halt_irq");
if (irq > 0)
free_irq(irq, chip);
spin_lock_irqsave(&pm8xxx_misc_chips_lock, flags);
list_del(&chip->link);
spin_unlock_irqrestore(&pm8xxx_misc_chips_lock, flags);
platform_set_drvdata(pdev, NULL);
kfree(chip);
return 0;
}
static struct platform_driver pm8xxx_misc_driver = {
.probe = pm8xxx_misc_probe,
.remove = __devexit_p(pm8xxx_misc_remove),
.driver = {
.name = PM8XXX_MISC_DEV_NAME,
.owner = THIS_MODULE,
},
};
static int __init pm8xxx_misc_init(void)
{
return platform_driver_register(&pm8xxx_misc_driver);
}
postcore_initcall(pm8xxx_misc_init);
static void __exit pm8xxx_misc_exit(void)
{
platform_driver_unregister(&pm8xxx_misc_driver);
}
module_exit(pm8xxx_misc_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC 8XXX misc driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:" PM8XXX_MISC_DEV_NAME);
| gpl-2.0 |
imoseyon/leanKernel-i500-gingerbread | arch/m68k/kernel/time.c | 844 | 2711 | /*
* linux/arch/m68k/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*
* This file contains the m68k-specific time handling details.
* Most of the stuff is located in the machine specific files.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/profile.h>
static inline int set_rtc_mmss(unsigned long nowtime)
{
if (mach_set_clock_mmss)
return mach_set_clock_mmss (nowtime);
return -1;
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
profile_tick(CPU_PROFILING);
#ifdef CONFIG_HEARTBEAT
/* use power LED as a heartbeat instead -- much more useful
for debugging -- based on the version for PReP by Cort */
/* acts like an actual heart beat -- ie thump-thump-pause... */
if (mach_heartbeat) {
static unsigned cnt = 0, period = 0, dist = 0;
if (cnt == 0 || cnt == dist)
mach_heartbeat( 1 );
else if (cnt == 7 || cnt == dist+7)
mach_heartbeat( 0 );
if (++cnt > period) {
cnt = 0;
/* The hyperbolic function below modifies the heartbeat period
* length in dependency of the current (5min) load. It goes
* through the points f(0)=126, f(1)=86, f(5)=51,
* f(inf)->30. */
period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
dist = period / 4;
}
}
#endif /* CONFIG_HEARTBEAT */
return IRQ_HANDLED;
}
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time time;
ts->tv_sec = 0;
ts->tv_nsec = 0;
if (mach_hwclk) {
mach_hwclk(0, &time);
if ((time.tm_year += 1900) < 1970)
time.tm_year += 100;
ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
}
}
void __init time_init(void)
{
mach_sched_init(timer_interrupt);
}
u32 arch_gettimeoffset(void)
{
return mach_gettimeoffset() * 1000;
}
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!mach_hwclk)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(rtc_init);
| gpl-2.0 |
vidoardes/Vivo-2.6.35 | drivers/input/joystick/gf2k.c | 844 | 9946 | /*
* Copyright (c) 1998-2001 Vojtech Pavlik
*/
/*
* Genius Flight 2000 joystick driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/gameport.h>
#include <linux/jiffies.h>
#define DRIVER_DESC "Genius Flight 2000 joystick driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define GF2K_START 400 /* The time we wait for the first bit [400 us] */
#define GF2K_STROBE 40 /* The time we wait for the first bit [40 us] */
#define GF2K_TIMEOUT 4 /* Wait for everything to settle [4 ms] */
#define GF2K_LENGTH 80 /* Max number of triplets in a packet */
/*
* Genius joystick ids ...
*/
#define GF2K_ID_G09 1
#define GF2K_ID_F30D 2
#define GF2K_ID_F30 3
#define GF2K_ID_F31D 4
#define GF2K_ID_F305 5
#define GF2K_ID_F23P 6
#define GF2K_ID_F31 7
#define GF2K_ID_MAX 7
static char gf2k_length[] = { 40, 40, 40, 40, 40, 40, 40, 40 };
static char gf2k_hat_to_axis[][2] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}};
static char *gf2k_names[] = {"", "Genius G-09D", "Genius F-30D", "Genius F-30", "Genius MaxFighter F-31D",
"Genius F-30-5", "Genius Flight2000 F-23", "Genius F-31"};
static unsigned char gf2k_hats[] = { 0, 2, 0, 0, 2, 0, 2, 0 };
static unsigned char gf2k_axes[] = { 0, 2, 0, 0, 4, 0, 4, 0 };
static unsigned char gf2k_joys[] = { 0, 0, 0, 0,10, 0, 8, 0 };
static unsigned char gf2k_pads[] = { 0, 6, 0, 0, 0, 0, 0, 0 };
static unsigned char gf2k_lens[] = { 0,18, 0, 0,18, 0,18, 0 };
static unsigned char gf2k_abs[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_GAS, ABS_BRAKE };
static short gf2k_btn_joy[] = { BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4 };
static short gf2k_btn_pad[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_TL2, BTN_TR2, BTN_START, BTN_SELECT };
static short gf2k_seq_reset[] = { 240, 340, 0 };
static short gf2k_seq_digital[] = { 590, 320, 860, 0 };
struct gf2k {
struct gameport *gameport;
struct input_dev *dev;
int reads;
int bads;
unsigned char id;
unsigned char length;
char phys[32];
};
/*
* gf2k_read_packet() reads a Genius Flight2000 packet.
*/
static int gf2k_read_packet(struct gameport *gameport, int length, char *data)
{
unsigned char u, v;
int i;
unsigned int t, p;
unsigned long flags;
t = gameport_time(gameport, GF2K_START);
p = gameport_time(gameport, GF2K_STROBE);
i = 0;
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
while (t > 0 && i < length) {
t--; u = v;
v = gameport_read(gameport);
if (v & ~u & 0x10) {
data[i++] = v >> 5;
t = p;
}
}
local_irq_restore(flags);
return i;
}
/*
* gf2k_trigger_seq() initializes a Genius Flight2000 joystick
* into digital mode.
*/
static void gf2k_trigger_seq(struct gameport *gameport, short *seq)
{
unsigned long flags;
int i, t;
local_irq_save(flags);
i = 0;
do {
gameport_trigger(gameport);
t = gameport_time(gameport, GF2K_TIMEOUT * 1000);
while ((gameport_read(gameport) & 1) && t) t--;
udelay(seq[i]);
} while (seq[++i]);
gameport_trigger(gameport);
local_irq_restore(flags);
}
/*
* js_sw_get_bits() composes bits from the triplet buffer into a __u64.
* Parameter 'pos' is bit number inside packet where to start at, 'num' is number
* of bits to be read, 'shift' is offset in the resulting __u64 to start at, bits
* is number of bits per triplet.
*/
#define GB(p,n,s) gf2k_get_bits(data, p, n, s)
static int gf2k_get_bits(unsigned char *buf, int pos, int num, int shift)
{
__u64 data = 0;
int i;
for (i = 0; i < num / 3 + 2; i++)
data |= buf[pos / 3 + i] << (i * 3);
data >>= pos % 3;
data &= (1 << num) - 1;
data <<= shift;
return data;
}
static void gf2k_read(struct gf2k *gf2k, unsigned char *data)
{
struct input_dev *dev = gf2k->dev;
int i, t;
for (i = 0; i < 4 && i < gf2k_axes[gf2k->id]; i++)
input_report_abs(dev, gf2k_abs[i], GB(i<<3,8,0) | GB(i+46,1,8) | GB(i+50,1,9));
for (i = 0; i < 2 && i < gf2k_axes[gf2k->id] - 4; i++)
input_report_abs(dev, gf2k_abs[i], GB(i*9+60,8,0) | GB(i+54,1,9));
t = GB(40,4,0);
for (i = 0; i < gf2k_hats[gf2k->id]; i++)
input_report_abs(dev, ABS_HAT0X + i, gf2k_hat_to_axis[t][i]);
t = GB(44,2,0) | GB(32,8,2) | GB(78,2,10);
for (i = 0; i < gf2k_joys[gf2k->id]; i++)
input_report_key(dev, gf2k_btn_joy[i], (t >> i) & 1);
for (i = 0; i < gf2k_pads[gf2k->id]; i++)
input_report_key(dev, gf2k_btn_pad[i], (t >> i) & 1);
input_sync(dev);
}
/*
* gf2k_poll() reads and analyzes Genius joystick data.
*/
static void gf2k_poll(struct gameport *gameport)
{
struct gf2k *gf2k = gameport_get_drvdata(gameport);
unsigned char data[GF2K_LENGTH];
gf2k->reads++;
if (gf2k_read_packet(gf2k->gameport, gf2k_length[gf2k->id], data) < gf2k_length[gf2k->id])
gf2k->bads++;
else
gf2k_read(gf2k, data);
}
static int gf2k_open(struct input_dev *dev)
{
struct gf2k *gf2k = input_get_drvdata(dev);
gameport_start_polling(gf2k->gameport);
return 0;
}
static void gf2k_close(struct input_dev *dev)
{
struct gf2k *gf2k = input_get_drvdata(dev);
gameport_stop_polling(gf2k->gameport);
}
/*
* gf2k_connect() probes for Genius id joysticks.
*/
static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
{
struct gf2k *gf2k;
struct input_dev *input_dev;
unsigned char data[GF2K_LENGTH];
int i, err;
gf2k = kzalloc(sizeof(struct gf2k), GFP_KERNEL);
input_dev = input_allocate_device();
if (!gf2k || !input_dev) {
err = -ENOMEM;
goto fail1;
}
gf2k->gameport = gameport;
gf2k->dev = input_dev;
gameport_set_drvdata(gameport, gf2k);
err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW);
if (err)
goto fail1;
gf2k_trigger_seq(gameport, gf2k_seq_reset);
msleep(GF2K_TIMEOUT);
gf2k_trigger_seq(gameport, gf2k_seq_digital);
msleep(GF2K_TIMEOUT);
if (gf2k_read_packet(gameport, GF2K_LENGTH, data) < 12) {
err = -ENODEV;
goto fail2;
}
if (!(gf2k->id = GB(7,2,0) | GB(3,3,2) | GB(0,3,5))) {
err = -ENODEV;
goto fail2;
}
#ifdef RESET_WORKS
if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) &&
(gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) {
err = -ENODEV;
goto fail2;
}
#else
gf2k->id = 6;
#endif
if (gf2k->id > GF2K_ID_MAX || !gf2k_axes[gf2k->id]) {
printk(KERN_WARNING "gf2k.c: Not yet supported joystick on %s. [id: %d type:%s]\n",
gameport->phys, gf2k->id, gf2k->id > GF2K_ID_MAX ? "Unknown" : gf2k_names[gf2k->id]);
err = -ENODEV;
goto fail2;
}
gameport_set_poll_handler(gameport, gf2k_poll);
gameport_set_poll_interval(gameport, 20);
snprintf(gf2k->phys, sizeof(gf2k->phys), "%s/input0", gameport->phys);
gf2k->length = gf2k_lens[gf2k->id];
input_dev->name = gf2k_names[gf2k->id];
input_dev->phys = gf2k->phys;
input_dev->id.bustype = BUS_GAMEPORT;
input_dev->id.vendor = GAMEPORT_ID_VENDOR_GENIUS;
input_dev->id.product = gf2k->id;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &gameport->dev;
input_set_drvdata(input_dev, gf2k);
input_dev->open = gf2k_open;
input_dev->close = gf2k_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (i = 0; i < gf2k_axes[gf2k->id]; i++)
set_bit(gf2k_abs[i], input_dev->absbit);
for (i = 0; i < gf2k_hats[gf2k->id]; i++) {
set_bit(ABS_HAT0X + i, input_dev->absbit);
input_dev->absmin[ABS_HAT0X + i] = -1;
input_dev->absmax[ABS_HAT0X + i] = 1;
}
for (i = 0; i < gf2k_joys[gf2k->id]; i++)
set_bit(gf2k_btn_joy[i], input_dev->keybit);
for (i = 0; i < gf2k_pads[gf2k->id]; i++)
set_bit(gf2k_btn_pad[i], input_dev->keybit);
gf2k_read_packet(gameport, gf2k->length, data);
gf2k_read(gf2k, data);
for (i = 0; i < gf2k_axes[gf2k->id]; i++) {
input_dev->absmax[gf2k_abs[i]] = (i < 2) ? input_dev->abs[gf2k_abs[i]] * 2 - 32 :
input_dev->abs[gf2k_abs[0]] + input_dev->abs[gf2k_abs[1]] - 32;
input_dev->absmin[gf2k_abs[i]] = 32;
input_dev->absfuzz[gf2k_abs[i]] = 8;
input_dev->absflat[gf2k_abs[i]] = (i < 2) ? 24 : 0;
}
err = input_register_device(gf2k->dev);
if (err)
goto fail2;
return 0;
fail2: gameport_close(gameport);
fail1: gameport_set_drvdata(gameport, NULL);
input_free_device(input_dev);
kfree(gf2k);
return err;
}
static void gf2k_disconnect(struct gameport *gameport)
{
struct gf2k *gf2k = gameport_get_drvdata(gameport);
input_unregister_device(gf2k->dev);
gameport_close(gameport);
gameport_set_drvdata(gameport, NULL);
kfree(gf2k);
}
static struct gameport_driver gf2k_drv = {
.driver = {
.name = "gf2k",
},
.description = DRIVER_DESC,
.connect = gf2k_connect,
.disconnect = gf2k_disconnect,
};
static int __init gf2k_init(void)
{
return gameport_register_driver(&gf2k_drv);
}
static void __exit gf2k_exit(void)
{
gameport_unregister_driver(&gf2k_drv);
}
module_init(gf2k_init);
module_exit(gf2k_exit);
| gpl-2.0 |
giangnguyennet/linux | arch/nios2/platform/platform.c | 1100 | 1152 | /*
* Copyright (C) 2013 Altera Corporation
* Copyright (C) 2011 Thomas Chou
* Copyright (C) 2011 Walter Goossens
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/io.h>
static int __init nios2_soc_device_init(void)
{
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
const char *machine;
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (soc_dev_attr) {
machine = of_flat_dt_get_machine_name();
if (machine)
soc_dev_attr->machine = kasprintf(GFP_KERNEL, "%s",
machine);
soc_dev_attr->family = "Nios II";
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->machine);
kfree(soc_dev_attr);
}
}
return of_platform_populate(NULL, of_default_bus_match_table,
NULL, NULL);
}
device_initcall(nios2_soc_device_init);
| gpl-2.0 |
fireshots/linux | drivers/scsi/aacraid/sa.c | 1356 | 10302 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* sa.c
*
* Abstract: Drawbridge specific support functions
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
static irqreturn_t aac_sa_intr(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
unsigned short intstat, mask;
intstat = sa_readw(dev, DoorbellReg_p);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have been enabled.
*/
mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask) {
if (intstat & PrintfReady) {
aac_printf(dev, sa_readl(dev, Mailbox5));
sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
sa_writew(dev, DoorbellReg_s, PrintfDone);
} else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
} else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
} else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
} else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* aac_sa_disable_interrupt - disable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_disable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
}
/**
* aac_sa_enable_interrupt - enable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_enable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
}
/**
* aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for
* @event: Event to notidy
*
* Notify the adapter of an event
*/
static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
{
switch (event) {
case AdapNormCmdQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_1);
break;
case HostNormRespNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_4);
break;
case AdapNormRespQue:
sa_writew(dev, DoorbellReg_s,DOORBELL_2);
break;
case HostNormCmdNotFull:
sa_writew(dev, DoorbellReg_s,DOORBELL_3);
break;
case HostShutdown:
/*
sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
*/
break;
case FastIo:
sa_writew(dev, DoorbellReg_s,DOORBELL_6);
break;
case AdapPrintfDone:
sa_writew(dev, DoorbellReg_s,DOORBELL_5);
break;
default:
BUG();
break;
}
}
/**
* sa_sync_cmd - send a command and wait
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
* @ret: adapter status
*
* This routine will send a synchronous command to the adapter and wait
* for its completion.
*/
static int sa_sync_cmd(struct aac_dev *dev, u32 command,
u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
{
unsigned long start;
int ok;
/*
* Write the Command into Mailbox 0
*/
sa_writel(dev, Mailbox0, command);
/*
* Write the parameters into Mailboxes 1 - 4
*/
sa_writel(dev, Mailbox1, p1);
sa_writel(dev, Mailbox2, p2);
sa_writel(dev, Mailbox3, p3);
sa_writel(dev, Mailbox4, p4);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Signal that there is a new synch command
*/
sa_writew(dev, DoorbellReg_s, DOORBELL_0);
ok = 0;
start = jiffies;
while(time_before(jiffies, start+30*HZ))
{
/*
* Delay 5uS so that the monitor gets access
*/
udelay(5);
/*
* Mon110 will set doorbell0 bit when it has
* completed the command.
*/
if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0) {
ok = 1;
break;
}
msleep(1);
}
if (ok != 1)
return -ETIMEDOUT;
/*
* Clear the synch command doorbell.
*/
sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
/*
* Pull the synch status from Mailbox 0.
*/
if (ret)
*ret = sa_readl(dev, Mailbox0);
if (r1)
*r1 = sa_readl(dev, Mailbox1);
if (r2)
*r2 = sa_readl(dev, Mailbox2);
if (r3)
*r3 = sa_readl(dev, Mailbox3);
if (r4)
*r4 = sa_readl(dev, Mailbox4);
return 0;
}
/**
* aac_sa_interrupt_adapter - interrupt an adapter
* @dev: Which adapter to enable.
*
* Breakpoint an adapter.
*/
static void aac_sa_interrupt_adapter (struct aac_dev *dev)
{
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
/**
* aac_sa_start_adapter - activate adapter
* @dev: Adapter
*
* Start up processing on an ARM based AAC adapter
*/
static void aac_sa_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
{
return -EINVAL;
}
/**
* aac_sa_check_health
* @dev: device to check if healthy
*
* Will attempt to determine if the specified adapter is alive and
* capable of handling requests, returning 0 if alive.
*/
static int aac_sa_check_health(struct aac_dev *dev)
{
long status = sa_readl(dev, Mailbox7);
/*
* Check to see if the board failed any self tests.
*/
if (status & SELF_TEST_FAILED)
return -1;
/*
* Check to see if the board panic'd while booting.
*/
if (status & KERNEL_PANIC)
return -2;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
if (!(status & KERNEL_UP_AND_RUNNING))
return -3;
/*
* Everything is OK
*/
return 0;
}
/**
* aac_sa_ioremap
* @size: mapping resize request
*
*/
static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
{
if (!size) {
iounmap(dev->regs.sa);
return 0;
}
dev->base = dev->regs.sa = ioremap(dev->base_start, size);
return (dev->base == NULL) ? -1 : 0;
}
/**
* aac_sa_init - initialize an ARM based AAC card
* @dev: device to configure
*
* Allocate and set up resources for the ARM based AAC variants. The
* device_interface in the commregion will be allocated and linked
* to the comm region.
*/
int aac_sa_init(struct aac_dev *dev)
{
unsigned long start;
unsigned long status;
int instance;
const char *name;
instance = dev->id;
name = dev->name;
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
}
/*
* Check to see if the board failed any self tests.
*/
if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
goto error_iounmap;
}
/*
* Check to see if the board panic'd while booting.
*/
if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
goto error_iounmap;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes.
*/
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
if (time_after(jiffies, start+startup_timeout*HZ)) {
status = sa_readl(dev, Mailbox7);
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
name, instance, status);
goto error_iounmap;
}
msleep(1);
}
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health;
dev->a_ops.adapter_restart = aac_sa_restart_adapter;
dev->a_ops.adapter_intr = aac_sa_intr;
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
/*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
aac_adapter_disable_int(dev);
aac_adapter_enable_int(dev);
if(aac_init_adapter(dev) == NULL)
goto error_irq;
dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED, "aacraid", (void *)dev) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configure, and it can start
* accepting requests
*/
aac_sa_start_adapter(dev);
return 0;
error_irq:
aac_sa_disable_interrupt(dev);
free_irq(dev->pdev->irq, (void *)dev);
error_iounmap:
return -1;
}
| gpl-2.0 |
maxwen/primou-kernel-KISS | drivers/scsi/bfa/bfad_debugfs.c | 2380 | 13896 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/debugfs.h>
#include "bfad_drv.h"
#include "bfad_im.h"
/*
* BFA debufs interface
*
* To access the interface, debugfs file system should be mounted
* if not already mounted using:
* mount -t debugfs none /sys/kernel/debug
*
* BFA Hierarchy:
* - bfa/pci_dev:<pci_name>
* where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa
*
* Debugging service available per pci_dev:
* fwtrc: To collect current firmware trace.
* drvtrc: To collect current driver trace
* fwsave: To collect last saved fw trace as a result of firmware crash.
* regwr: To write one word to chip register
* regrd: To read one or more words from chip register.
*/
struct bfad_debug_info {
char *debug_buffer;
void *i_private;
int buffer_len;
};
static int
bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file)
{
struct bfad_port_s *port = inode->i_private;
struct bfad_s *bfad = port->bfad;
struct bfad_debug_info *debug;
debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->debug_buffer = (void *) bfad->trcmod;
debug->buffer_len = sizeof(struct bfa_trc_mod_s);
file->private_data = debug;
return 0;
}
static int
bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
{
struct bfad_port_s *port = inode->i_private;
struct bfad_s *bfad = port->bfad;
struct bfad_debug_info *fw_debug;
unsigned long flags;
int rc;
fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
if (!fw_debug)
return -ENOMEM;
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
bfad->inst_no);
return -ENOMEM;
}
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
&fw_debug->buffer_len);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK) {
vfree(fw_debug->debug_buffer);
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n",
bfad->inst_no);
return -ENOMEM;
}
file->private_data = fw_debug;
return 0;
}
static int
bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
{
struct bfad_port_s *port = inode->i_private;
struct bfad_s *bfad = port->bfad;
struct bfad_debug_info *fw_debug;
unsigned long flags;
int rc;
fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
if (!fw_debug)
return -ENOMEM;
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
bfad->inst_no);
return -ENOMEM;
}
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
&fw_debug->buffer_len);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK) {
vfree(fw_debug->debug_buffer);
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n",
bfad->inst_no);
return -ENOMEM;
}
file->private_data = fw_debug;
return 0;
}
static int
bfad_debugfs_open_reg(struct inode *inode, struct file *file)
{
struct bfad_debug_info *reg_debug;
reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
if (!reg_debug)
return -ENOMEM;
reg_debug->i_private = inode->i_private;
file->private_data = reg_debug;
return 0;
}
/* Changes the current file position */
static loff_t
bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
{
struct bfad_debug_info *debug;
loff_t pos = file->f_pos;
debug = file->private_data;
switch (orig) {
case 0:
file->f_pos = offset;
break;
case 1:
file->f_pos += offset;
break;
case 2:
file->f_pos = debug->buffer_len - offset;
break;
default:
return -EINVAL;
}
if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
file->f_pos = pos;
return -EINVAL;
}
return file->f_pos;
}
static ssize_t
bfad_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *pos)
{
struct bfad_debug_info *debug = file->private_data;
if (!debug || !debug->debug_buffer)
return 0;
return simple_read_from_buffer(buf, nbytes, pos,
debug->debug_buffer, debug->buffer_len);
}
#define BFA_REG_CT_ADDRSZ (0x40000)
#define BFA_REG_CB_ADDRSZ (0x20000)
#define BFA_REG_ADDRSZ(__bfa) \
((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
static bfa_status_t
bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
{
u8 area;
/* check [16:15] */
area = (offset >> 15) & 0x7;
if (area == 0) {
/* PCIe core register */
if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else if (area == 0x1) {
/* CB 32 KB memory page */
if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
return BFA_STATUS_EINVAL;
} else {
/* CB register space 64KB */
if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
}
static ssize_t
bfad_debugfs_read_regrd(struct file *file, char __user *buf,
size_t nbytes, loff_t *pos)
{
struct bfad_debug_info *regrd_debug = file->private_data;
struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
struct bfad_s *bfad = port->bfad;
ssize_t rc;
if (!bfad->regdata)
return 0;
rc = simple_read_from_buffer(buf, nbytes, pos,
bfad->regdata, bfad->reglen);
if ((*pos + nbytes) >= bfad->reglen) {
kfree(bfad->regdata);
bfad->regdata = NULL;
bfad->reglen = 0;
}
return rc;
}
static ssize_t
bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct bfad_debug_info *regrd_debug = file->private_data;
struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
struct bfad_s *bfad = port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
int addr, len, rc, i;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
void *kern_buf;
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf) {
printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
bfad->inst_no);
return -ENOMEM;
}
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
printk(KERN_INFO
"bfad[%d]: %s failed to read user buf\n",
bfad->inst_no, __func__);
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bfad->regdata);
bfad->regdata = NULL;
bfad->reglen = 0;
bfad->regdata = kzalloc(len << 2, GFP_KERNEL);
if (!bfad->regdata) {
printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n",
bfad->inst_no);
return -ENOMEM;
}
bfad->reglen = len << 2;
rb = bfa_ioc_bar0(ioc);
addr &= BFA_REG_ADDRMSK(bfa);
/* offset and len sanity check */
rc = bfad_reg_offset_check(bfa, addr, len);
if (rc) {
printk(KERN_INFO "bfad[%d]: Failed reg offset check\n",
bfad->inst_no);
kfree(bfad->regdata);
bfad->regdata = NULL;
bfad->reglen = 0;
return -EINVAL;
}
reg_addr = rb + addr;
regbuf = (u32 *)bfad->regdata;
spin_lock_irqsave(&bfad->bfad_lock, flags);
for (i = 0; i < len; i++) {
*regbuf = readl(reg_addr);
regbuf++;
reg_addr += sizeof(u32);
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return nbytes;
}
static ssize_t
bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct bfad_debug_info *debug = file->private_data;
struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private;
struct bfad_s *bfad = port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
int addr, val, rc;
void __iomem *reg_addr;
unsigned long flags;
void *kern_buf;
kern_buf = kzalloc(nbytes, GFP_KERNEL);
if (!kern_buf) {
printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
bfad->inst_no);
return -ENOMEM;
}
if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
kfree(kern_buf);
return -ENOMEM;
}
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
printk(KERN_INFO
"bfad[%d]: %s failed to read user buf\n",
bfad->inst_no, __func__);
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
/* offset and len sanity check */
rc = bfad_reg_offset_check(bfa, addr, 1);
if (rc) {
printk(KERN_INFO
"bfad[%d]: Failed reg offset check\n",
bfad->inst_no);
return -EINVAL;
}
reg_addr = (bfa_ioc_bar0(ioc)) + addr;
spin_lock_irqsave(&bfad->bfad_lock, flags);
writel(val, reg_addr);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return nbytes;
}
static int
bfad_debugfs_release(struct inode *inode, struct file *file)
{
struct bfad_debug_info *debug = file->private_data;
if (!debug)
return 0;
file->private_data = NULL;
kfree(debug);
return 0;
}
static int
bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
{
struct bfad_debug_info *fw_debug = file->private_data;
if (!fw_debug)
return 0;
if (fw_debug->debug_buffer)
vfree(fw_debug->debug_buffer);
file->private_data = NULL;
kfree(fw_debug);
return 0;
}
static const struct file_operations bfad_debugfs_op_drvtrc = {
.owner = THIS_MODULE,
.open = bfad_debugfs_open_drvtrc,
.llseek = bfad_debugfs_lseek,
.read = bfad_debugfs_read,
.release = bfad_debugfs_release,
};
static const struct file_operations bfad_debugfs_op_fwtrc = {
.owner = THIS_MODULE,
.open = bfad_debugfs_open_fwtrc,
.llseek = bfad_debugfs_lseek,
.read = bfad_debugfs_read,
.release = bfad_debugfs_release_fwtrc,
};
static const struct file_operations bfad_debugfs_op_fwsave = {
.owner = THIS_MODULE,
.open = bfad_debugfs_open_fwsave,
.llseek = bfad_debugfs_lseek,
.read = bfad_debugfs_read,
.release = bfad_debugfs_release_fwtrc,
};
static const struct file_operations bfad_debugfs_op_regrd = {
.owner = THIS_MODULE,
.open = bfad_debugfs_open_reg,
.llseek = bfad_debugfs_lseek,
.read = bfad_debugfs_read_regrd,
.write = bfad_debugfs_write_regrd,
.release = bfad_debugfs_release,
};
static const struct file_operations bfad_debugfs_op_regwr = {
.owner = THIS_MODULE,
.open = bfad_debugfs_open_reg,
.llseek = bfad_debugfs_lseek,
.write = bfad_debugfs_write_regwr,
.release = bfad_debugfs_release,
};
struct bfad_debugfs_entry {
const char *name;
mode_t mode;
const struct file_operations *fops;
};
static const struct bfad_debugfs_entry bfad_debugfs_files[] = {
{ "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, },
{ "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, },
{ "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, },
{ "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, },
{ "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, },
};
static struct dentry *bfa_debugfs_root;
static atomic_t bfa_debugfs_port_count;
inline void
bfad_debugfs_init(struct bfad_port_s *port)
{
struct bfad_s *bfad = port->bfad;
const struct bfad_debugfs_entry *file;
char name[64];
int i;
if (!bfa_debugfs_enable)
return;
/* Setup the BFA debugfs root directory*/
if (!bfa_debugfs_root) {
bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
atomic_set(&bfa_debugfs_port_count, 0);
if (!bfa_debugfs_root) {
printk(KERN_WARNING
"BFA debugfs root dir creation failed\n");
goto err;
}
}
/* Setup the pci_dev debugfs directory for the port */
snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name);
if (!port->port_debugfs_root) {
port->port_debugfs_root =
debugfs_create_dir(name, bfa_debugfs_root);
if (!port->port_debugfs_root) {
printk(KERN_WARNING
"bfa %s: debugfs root creation failed\n",
bfad->pci_name);
goto err;
}
atomic_inc(&bfa_debugfs_port_count);
for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
file = &bfad_debugfs_files[i];
bfad->bfad_dentry_files[i] =
debugfs_create_file(file->name,
file->mode,
port->port_debugfs_root,
port,
file->fops);
if (!bfad->bfad_dentry_files[i]) {
printk(KERN_WARNING
"bfa %s: debugfs %s creation failed\n",
bfad->pci_name, file->name);
goto err;
}
}
}
err:
return;
}
inline void
bfad_debugfs_exit(struct bfad_port_s *port)
{
struct bfad_s *bfad = port->bfad;
int i;
for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
if (bfad->bfad_dentry_files[i]) {
debugfs_remove(bfad->bfad_dentry_files[i]);
bfad->bfad_dentry_files[i] = NULL;
}
}
/*
* Remove the pci_dev debugfs directory for the port */
if (port->port_debugfs_root) {
debugfs_remove(port->port_debugfs_root);
port->port_debugfs_root = NULL;
atomic_dec(&bfa_debugfs_port_count);
}
/* Remove the BFA debugfs root directory */
if (atomic_read(&bfa_debugfs_port_count) == 0) {
debugfs_remove(bfa_debugfs_root);
bfa_debugfs_root = NULL;
}
}
| gpl-2.0 |
avoltz/linux-vybrid | drivers/staging/xgifb/XGI_main_26.c | 2380 | 71956 | /*
* XG20, XG21, XG40, XG42 frame buffer device
* for Linux kernels 2.5.x, 2.6.x
* Base on TW's sis fbdev code.
*/
/* #include <linux/config.h> */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/console.h>
#include <linux/selection.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/vt_kern.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#ifndef XGIFB_PAN
#define XGIFB_PAN
#endif
#include <linux/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "XGIfb.h"
#include "vgatypes.h"
#include "XGI_main.h"
#include "vb_init.h"
#include "vb_util.h"
#include "vb_setmode.h"
#define Index_CR_GPIO_Reg1 0x48
#define Index_CR_GPIO_Reg2 0x49
#define Index_CR_GPIO_Reg3 0x4a
#define GPIOG_EN (1<<6)
#define GPIOG_WRITE (1<<6)
#define GPIOG_READ (1<<1)
#define XGIFB_ROM_SIZE 65536
/* -------------------- Macro definitions ---------------------------- */
#undef XGIFBDEBUG
#ifdef XGIFBDEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#ifdef XGIFBDEBUG
static void dumpVGAReg(void)
{
u8 i, reg;
xgifb_reg_set(XGISR, 0x05, 0x86);
/*
xgifb_reg_set(XGISR, 0x08, 0x4f);
xgifb_reg_set(XGISR, 0x0f, 0x20);
xgifb_reg_set(XGISR, 0x11, 0x4f);
xgifb_reg_set(XGISR, 0x13, 0x45);
xgifb_reg_set(XGISR, 0x14, 0x51);
xgifb_reg_set(XGISR, 0x1e, 0x41);
xgifb_reg_set(XGISR, 0x1f, 0x0);
xgifb_reg_set(XGISR, 0x20, 0xa1);
xgifb_reg_set(XGISR, 0x22, 0xfb);
xgifb_reg_set(XGISR, 0x26, 0x22);
xgifb_reg_set(XGISR, 0x3e, 0x07);
*/
/* xgifb_reg_set(XGICR, 0x19, 0x00); */
/* xgifb_reg_set(XGICR, 0x1a, 0x3C); */
/* xgifb_reg_set(XGICR, 0x22, 0xff); */
/* xgifb_reg_set(XGICR, 0x3D, 0x10); */
/* xgifb_reg_set(XGICR, 0x4a, 0xf3); */
/* xgifb_reg_set(XGICR, 0x57, 0x0); */
/* xgifb_reg_set(XGICR, 0x7a, 0x2c); */
/* xgifb_reg_set(XGICR, 0x82, 0xcc); */
/* xgifb_reg_set(XGICR, 0x8c, 0x0); */
/*
xgifb_reg_set(XGICR, 0x99, 0x1);
xgifb_reg_set(XGICR, 0x41, 0x40);
*/
for (i = 0; i < 0x4f; i++) {
reg = xgifb_reg_get(XGISR, i);
printk("\no 3c4 %x", i);
printk("\ni 3c5 => %x", reg);
}
for (i = 0; i < 0xF0; i++) {
reg = xgifb_reg_get(XGICR, i);
printk("\no 3d4 %x", i);
printk("\ni 3d5 => %x", reg);
}
/*
xgifb_reg_set(XGIPART1,0x2F,1);
for (i=1; i < 0x50; i++) {
reg = xgifb_reg_get(XGIPART1, i);
printk("\no d004 %x", i);
printk("\ni d005 => %x", reg);
}
for (i=0; i < 0x50; i++) {
reg = xgifb_reg_get(XGIPART2, i);
printk("\no d010 %x", i);
printk("\ni d011 => %x", reg);
}
for (i=0; i < 0x50; i++) {
reg = xgifb_reg_get(XGIPART3, i);
printk("\no d012 %x",i);
printk("\ni d013 => %x",reg);
}
for (i=0; i < 0x50; i++) {
reg = xgifb_reg_get(XGIPART4, i);
printk("\no d014 %x",i);
printk("\ni d015 => %x",reg);
}
*/
}
#else
static inline void dumpVGAReg(void)
{
}
#endif
/* data for XGI components */
struct video_info xgi_video_info;
#if 1
#define DEBUGPRN(x)
#else
#define DEBUGPRN(x) printk(KERN_INFO x "\n");
#endif
/* --------------- Hardware Access Routines -------------------------- */
static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
unsigned short RefreshRateTableIndex = 0;
/* unsigned long temp = 0; */
int Clock;
XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
/*
temp = XGI_SearchModeID(ModeNo , &ModeIdIndex, XGI_Pr) ;
if (!temp) {
printk(KERN_ERR "Could not find mode %x\n", ModeNo);
return 65000;
}
RefreshRateTableIndex = XGI_Pr->EModeIDTable[ModeIdIndex].REFindex;
RefreshRateTableIndex += (rateindex - 1);
*/
ClockIndex = XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
Clock = XGI_Pr->VCLKData[ClockIndex].CLOCK * 1000;
return Clock;
}
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned char modeno, unsigned char rateindex,
u32 *left_margin, u32 *right_margin, u32 *upper_margin,
u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
u32 *vmode)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, index = 0;
unsigned short RefreshRateTableIndex = 0;
unsigned short VRE, VBE, VRS, VBS, VDE, VT;
unsigned short HRE, HBE, HRS, HBS, HDE, HT;
unsigned char sr_data, cr_data, cr_data2;
unsigned long cr_data3;
int A, B, C, D, E, F, temp, j;
XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
/*
temp = XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr);
if (!temp)
return 0;
RefreshRateTableIndex = XGI_Pr->EModeIDTable[ModeIdIndex].REFindex;
RefreshRateTableIndex += (rateindex - 1);
*/
index = XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[5];
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[0];
/* Horizontal total */
HT = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x03) << 8);
A = HT + 5;
/*
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[1];
Horizontal display enable end
HDE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x0C) << 6);
*/
HDE = (XGI_Pr->RefIndex[RefreshRateTableIndex].XRes >> 3) - 1;
E = HDE + 1;
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[3];
/* Horizontal retrace (=sync) start */
HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2);
F = HRS - E - 3;
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[1];
/* Horizontal blank start */
HBS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x30) << 4);
sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[6];
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[2];
cr_data2 = XGI_Pr->XGINEWUB_CRT1Table[index].CR[4];
/* Horizontal blank end */
HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2)
| ((unsigned short) (sr_data & 0x03) << 6);
/* Horizontal retrace (=sync) end */
HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
temp = HBE - ((E - 1) & 255);
B = (temp > 0) ? temp : (temp + 256);
temp = HRE - ((E + F + 3) & 63);
C = (temp > 0) ? temp : (temp + 64);
D = B - F - C;
*left_margin = D * 8;
*right_margin = F * 8;
*hsync_len = C * 8;
sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[14];
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[8];
cr_data2 = XGI_Pr->XGINEWUB_CRT1Table[index].CR[9];
/* Vertical total */
VT = (cr_data & 0xFF) | ((unsigned short) (cr_data2 & 0x01) << 8)
| ((unsigned short) (cr_data2 & 0x20) << 4)
| ((unsigned short) (sr_data & 0x01) << 10);
A = VT + 2;
/* cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[10]; */
/* Vertical display enable end */
/*
VDE = (cr_data & 0xff) |
((unsigned short) (cr_data2 & 0x02) << 7) |
((unsigned short) (cr_data2 & 0x40) << 3) |
((unsigned short) (sr_data & 0x02) << 9);
*/
VDE = XGI_Pr->RefIndex[RefreshRateTableIndex].YRes - 1;
E = VDE + 1;
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[10];
/* Vertical retrace (=sync) start */
VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6)
| ((unsigned short) (cr_data2 & 0x80) << 2)
| ((unsigned short) (sr_data & 0x08) << 7);
F = VRS + 1 - E;
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[12];
cr_data3 = (XGI_Pr->XGINEWUB_CRT1Table[index].CR[14] & 0x80) << 5;
/* Vertical blank start */
VBS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x08) << 5)
| ((unsigned short) (cr_data3 & 0x20) << 4)
| ((unsigned short) (sr_data & 0x04) << 8);
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[13];
/* Vertical blank end */
VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4);
temp = VBE - ((E - 1) & 511);
B = (temp > 0) ? temp : (temp + 512);
cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[11];
/* Vertical retrace (=sync) end */
VRE = (cr_data & 0x0f) | ((sr_data & 0x20) >> 1);
temp = VRE - ((E + F - 1) & 31);
C = (temp > 0) ? temp : (temp + 32);
D = B - F - C;
*upper_margin = D;
*lower_margin = F;
*vsync_len = C;
if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x8000)
*sync &= ~FB_SYNC_VERT_HIGH_ACT;
else
*sync |= FB_SYNC_VERT_HIGH_ACT;
if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x4000)
*sync &= ~FB_SYNC_HOR_HIGH_ACT;
else
*sync |= FB_SYNC_HOR_HIGH_ACT;
*vmode = FB_VMODE_NONINTERLACED;
if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x0080)
*vmode = FB_VMODE_INTERLACED;
else {
j = 0;
while (XGI_Pr->EModeIDTable[j].Ext_ModeID != 0xff) {
if (XGI_Pr->EModeIDTable[j].Ext_ModeID ==
XGI_Pr->RefIndex[RefreshRateTableIndex].ModeID) {
if (XGI_Pr->EModeIDTable[j].Ext_ModeFlag &
DoubleScanMode) {
*vmode = FB_VMODE_DOUBLE;
}
break;
}
j++;
}
}
return 1;
}
static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
{
XGI_Pr->RelIO = BaseAddr;
XGI_Pr->P3c4 = BaseAddr + 0x14;
XGI_Pr->P3d4 = BaseAddr + 0x24;
XGI_Pr->P3c0 = BaseAddr + 0x10;
XGI_Pr->P3ce = BaseAddr + 0x1e;
XGI_Pr->P3c2 = BaseAddr + 0x12;
XGI_Pr->P3ca = BaseAddr + 0x1a;
XGI_Pr->P3c6 = BaseAddr + 0x16;
XGI_Pr->P3c7 = BaseAddr + 0x17;
XGI_Pr->P3c8 = BaseAddr + 0x18;
XGI_Pr->P3c9 = BaseAddr + 0x19;
XGI_Pr->P3da = BaseAddr + 0x2A;
/* Digital video interface registers (LCD) */
XGI_Pr->Part1Port = BaseAddr + XGI_CRT2_PORT_04;
/* 301 TV Encoder registers */
XGI_Pr->Part2Port = BaseAddr + XGI_CRT2_PORT_10;
/* 301 Macrovision registers */
XGI_Pr->Part3Port = BaseAddr + XGI_CRT2_PORT_12;
/* 301 VGA2 (and LCD) registers */
XGI_Pr->Part4Port = BaseAddr + XGI_CRT2_PORT_14;
/* 301 palette address port registers */
XGI_Pr->Part5Port = BaseAddr + XGI_CRT2_PORT_14 + 2;
}
/* ------------ Interface for init & mode switching code ------------- */
static unsigned char XGIfb_query_VGA_config_space(
struct xgi_hw_device_info *pXGIhw_ext, unsigned long offset,
unsigned long set, unsigned long *value)
{
static struct pci_dev *pdev = NULL;
static unsigned char init = 0, valid_pdev = 0;
if (!set)
DPRINTK("XGIfb: Get VGA offset 0x%lx\n", offset);
else
DPRINTK("XGIfb: Set offset 0x%lx to 0x%lx\n", offset, *value);
if (!init) {
init = 1;
pdev = pci_get_device(PCI_VENDOR_ID_XG, xgi_video_info.chip_id,
pdev);
if (pdev) {
valid_pdev = 1;
pci_dev_put(pdev);
}
}
if (!valid_pdev) {
printk(KERN_DEBUG "XGIfb: Can't find XGI %d VGA device.\n",
xgi_video_info.chip_id);
return 0;
}
if (set == 0)
pci_read_config_dword(pdev, offset, (u32 *) value);
else
pci_write_config_dword(pdev, offset, (u32)(*value));
return 1;
}
/* ------------------ Internal helper routines ----------------- */
static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
{
int found_mode = 0;
int XGIfb_mode_idx = 0;
found_mode = 0;
while ((XGIbios_mode[XGIfb_mode_idx].mode_no != 0)
&& (XGIbios_mode[XGIfb_mode_idx].xres
<= XGI21_LCDCapList[0].LVDSHDE)) {
if ((XGIbios_mode[XGIfb_mode_idx].xres
== XGI21_LCDCapList[0].LVDSHDE)
&& (XGIbios_mode[XGIfb_mode_idx].yres
== XGI21_LCDCapList[0].LVDSVDE)
&& (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) {
XGIfb_mode_no = XGIbios_mode[XGIfb_mode_idx].mode_no;
found_mode = 1;
break;
}
XGIfb_mode_idx++;
}
if (!found_mode)
XGIfb_mode_idx = 0;
return XGIfb_mode_idx;
}
static void XGIfb_search_mode(const char *name)
{
int i = 0, j = 0, l;
if (name == NULL) {
printk(KERN_ERR "XGIfb: Internal error, using default mode.\n");
xgifb_mode_idx = DEFAULT_MODE;
if ((xgi_video_info.chip == XG21)
&& ((xgi_video_info.disp_state & DISPTYPE_DISP2)
== DISPTYPE_LCD)) {
xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
}
return;
}
if (!strcmp(name, XGIbios_mode[MODE_INDEX_NONE].name)) {
printk(KERN_ERR "XGIfb: Mode 'none' not supported anymore. Using default.\n");
xgifb_mode_idx = DEFAULT_MODE;
if ((xgi_video_info.chip == XG21)
&& ((xgi_video_info.disp_state & DISPTYPE_DISP2)
== DISPTYPE_LCD)) {
xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
}
return;
}
while (XGIbios_mode[i].mode_no != 0) {
l = min(strlen(name), strlen(XGIbios_mode[i].name));
if (!strncmp(name, XGIbios_mode[i].name, l)) {
xgifb_mode_idx = i;
j = 1;
break;
}
i++;
}
if (!j)
printk(KERN_INFO "XGIfb: Invalid mode '%s'\n", name);
}
static void XGIfb_search_vesamode(unsigned int vesamode)
{
int i = 0, j = 0;
if (vesamode == 0) {
printk(KERN_ERR "XGIfb: Mode 'none' not supported anymore. Using default.\n");
xgifb_mode_idx = DEFAULT_MODE;
if ((xgi_video_info.chip == XG21)
&& ((xgi_video_info.disp_state & DISPTYPE_DISP2)
== DISPTYPE_LCD)) {
xgifb_mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx();
}
return;
}
vesamode &= 0x1dff; /* Clean VESA mode number from other flags */
while (XGIbios_mode[i].mode_no != 0) {
if ((XGIbios_mode[i].vesa_mode_no_1 == vesamode) ||
(XGIbios_mode[i].vesa_mode_no_2 == vesamode)) {
xgifb_mode_idx = i;
j = 1;
break;
}
i++;
}
if (!j)
printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
}
static int XGIfb_GetXG21LVDSData(void)
{
u8 tmp;
unsigned char *pData;
int i, j, k;
tmp = xgifb_reg_get(XGISR, 0x1e);
xgifb_reg_set(XGISR, 0x1e, tmp | 4);
pData = xgi_video_info.mmio_vbase + 0x20000;
if ((pData[0x0] == 0x55) &&
(pData[0x1] == 0xAA) &&
(pData[0x65] & 0x1)) {
i = pData[0x316] | (pData[0x317] << 8);
j = pData[i - 1];
if (j == 0xff)
j = 1;
k = 0;
do {
XGI21_LCDCapList[k].LVDS_Capability = pData[i]
| (pData[i + 1] << 8);
XGI21_LCDCapList[k].LVDSHT = pData[i + 2] | (pData[i
+ 3] << 8);
XGI21_LCDCapList[k].LVDSVT = pData[i + 4] | (pData[i
+ 5] << 8);
XGI21_LCDCapList[k].LVDSHDE = pData[i + 6] | (pData[i
+ 7] << 8);
XGI21_LCDCapList[k].LVDSVDE = pData[i + 8] | (pData[i
+ 9] << 8);
XGI21_LCDCapList[k].LVDSHFP = pData[i + 10] | (pData[i
+ 11] << 8);
XGI21_LCDCapList[k].LVDSVFP = pData[i + 12] | (pData[i
+ 13] << 8);
XGI21_LCDCapList[k].LVDSHSYNC = pData[i + 14]
| (pData[i + 15] << 8);
XGI21_LCDCapList[k].LVDSVSYNC = pData[i + 16]
| (pData[i + 17] << 8);
XGI21_LCDCapList[k].VCLKData1 = pData[i + 18];
XGI21_LCDCapList[k].VCLKData2 = pData[i + 19];
XGI21_LCDCapList[k].PSC_S1 = pData[i + 20];
XGI21_LCDCapList[k].PSC_S2 = pData[i + 21];
XGI21_LCDCapList[k].PSC_S3 = pData[i + 22];
XGI21_LCDCapList[k].PSC_S4 = pData[i + 23];
XGI21_LCDCapList[k].PSC_S5 = pData[i + 24];
i += 25;
j--;
k++;
} while ((j > 0) && (k < (sizeof(XGI21_LCDCapList)
/ sizeof(struct XGI21_LVDSCapStruct))));
return 1;
}
return 0;
}
static int XGIfb_validate_mode(int myindex)
{
u16 xres, yres;
if (xgi_video_info.chip == XG21) {
if ((xgi_video_info.disp_state & DISPTYPE_DISP2)
== DISPTYPE_LCD) {
xres = XGI21_LCDCapList[0].LVDSHDE;
yres = XGI21_LCDCapList[0].LVDSVDE;
if (XGIbios_mode[myindex].xres > xres)
return -1;
if (XGIbios_mode[myindex].yres > yres)
return -1;
if ((XGIbios_mode[myindex].xres < xres) &&
(XGIbios_mode[myindex].yres < yres)) {
if (XGIbios_mode[myindex].bpp > 8)
return -1;
}
}
return myindex;
}
/* FIXME: for now, all is valid on XG27 */
if (xgi_video_info.chip == XG27)
return myindex;
if (!(XGIbios_mode[myindex].chipset & MD_XGI315))
return -1;
switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
case DISPTYPE_LCD:
switch (XGIhw_ext.ulCRT2LCDType) {
case LCD_640x480:
xres = 640;
yres = 480;
break;
case LCD_800x600:
xres = 800;
yres = 600;
break;
case LCD_1024x600:
xres = 1024;
yres = 600;
break;
case LCD_1024x768:
xres = 1024;
yres = 768;
break;
case LCD_1152x768:
xres = 1152;
yres = 768;
break;
case LCD_1280x960:
xres = 1280;
yres = 960;
break;
case LCD_1280x768:
xres = 1280;
yres = 768;
break;
case LCD_1280x1024:
xres = 1280;
yres = 1024;
break;
case LCD_1400x1050:
xres = 1400;
yres = 1050;
break;
case LCD_1600x1200:
xres = 1600;
yres = 1200;
break;
/* case LCD_320x480: */ /* TW: FSTN */
/*
xres = 320;
yres = 480;
break;
*/
default:
xres = 0;
yres = 0;
break;
}
if (XGIbios_mode[myindex].xres > xres)
return -1;
if (XGIbios_mode[myindex].yres > yres)
return -1;
if ((XGIhw_ext.ulExternalChip == 0x01) || /* LVDS */
(XGIhw_ext.ulExternalChip == 0x05)) { /* LVDS+Chrontel */
switch (XGIbios_mode[myindex].xres) {
case 512:
if (XGIbios_mode[myindex].yres != 512)
return -1;
if (XGIhw_ext.ulCRT2LCDType == LCD_1024x600)
return -1;
break;
case 640:
if ((XGIbios_mode[myindex].yres != 400)
&& (XGIbios_mode[myindex].yres
!= 480))
return -1;
break;
case 800:
if (XGIbios_mode[myindex].yres != 600)
return -1;
break;
case 1024:
if ((XGIbios_mode[myindex].yres != 600) &&
(XGIbios_mode[myindex].yres != 768))
return -1;
if ((XGIbios_mode[myindex].yres == 600) &&
(XGIhw_ext.ulCRT2LCDType != LCD_1024x600))
return -1;
break;
case 1152:
if ((XGIbios_mode[myindex].yres) != 768)
return -1;
if (XGIhw_ext.ulCRT2LCDType != LCD_1152x768)
return -1;
break;
case 1280:
if ((XGIbios_mode[myindex].yres != 768) &&
(XGIbios_mode[myindex].yres != 1024))
return -1;
if ((XGIbios_mode[myindex].yres == 768) &&
(XGIhw_ext.ulCRT2LCDType != LCD_1280x768))
return -1;
break;
case 1400:
if (XGIbios_mode[myindex].yres != 1050)
return -1;
break;
case 1600:
if (XGIbios_mode[myindex].yres != 1200)
return -1;
break;
default:
return -1;
}
} else {
switch (XGIbios_mode[myindex].xres) {
case 512:
if (XGIbios_mode[myindex].yres != 512)
return -1;
break;
case 640:
if ((XGIbios_mode[myindex].yres != 400) &&
(XGIbios_mode[myindex].yres != 480))
return -1;
break;
case 800:
if (XGIbios_mode[myindex].yres != 600)
return -1;
break;
case 1024:
if (XGIbios_mode[myindex].yres != 768)
return -1;
break;
case 1280:
if ((XGIbios_mode[myindex].yres != 960) &&
(XGIbios_mode[myindex].yres != 1024))
return -1;
if (XGIbios_mode[myindex].yres == 960) {
if (XGIhw_ext.ulCRT2LCDType ==
LCD_1400x1050)
return -1;
}
break;
case 1400:
if (XGIbios_mode[myindex].yres != 1050)
return -1;
break;
case 1600:
if (XGIbios_mode[myindex].yres != 1200)
return -1;
break;
default:
return -1;
}
}
break;
case DISPTYPE_TV:
switch (XGIbios_mode[myindex].xres) {
case 512:
case 640:
case 800:
break;
case 720:
if (xgi_video_info.TV_type == TVMODE_NTSC) {
if (XGIbios_mode[myindex].yres != 480)
return -1;
} else if (xgi_video_info.TV_type == TVMODE_PAL) {
if (XGIbios_mode[myindex].yres != 576)
return -1;
}
/* TW: LVDS/CHRONTEL does not support 720 */
if (xgi_video_info.hasVB == HASVB_LVDS_CHRONTEL ||
xgi_video_info.hasVB == HASVB_CHRONTEL) {
return -1;
}
break;
case 1024:
if (xgi_video_info.TV_type == TVMODE_NTSC) {
if (XGIbios_mode[myindex].bpp == 32)
return -1;
}
break;
default:
return -1;
}
break;
case DISPTYPE_CRT2:
if (XGIbios_mode[myindex].xres > 1280)
return -1;
break;
}
return myindex;
}
static void XGIfb_search_crt2type(const char *name)
{
int i = 0;
if (name == NULL)
return;
while (XGI_crt2type[i].type_no != -1) {
if (!strcmp(name, XGI_crt2type[i].name)) {
XGIfb_crt2type = XGI_crt2type[i].type_no;
XGIfb_tvplug = XGI_crt2type[i].tvplug_no;
break;
}
i++;
}
if (XGIfb_crt2type < 0)
printk(KERN_INFO "XGIfb: Invalid CRT2 type: %s\n", name);
}
static u8 XGIfb_search_refresh_rate(unsigned int rate)
{
u16 xres, yres;
int i = 0;
xres = XGIbios_mode[xgifb_mode_idx].xres;
yres = XGIbios_mode[xgifb_mode_idx].yres;
XGIfb_rate_idx = 0;
while ((XGIfb_vrate[i].idx != 0) && (XGIfb_vrate[i].xres <= xres)) {
if ((XGIfb_vrate[i].xres == xres) &&
(XGIfb_vrate[i].yres == yres)) {
if (XGIfb_vrate[i].refresh == rate) {
XGIfb_rate_idx = XGIfb_vrate[i].idx;
break;
} else if (XGIfb_vrate[i].refresh > rate) {
if ((XGIfb_vrate[i].refresh - rate) <= 3) {
DPRINTK("XGIfb: Adjusting rate from %d up to %d\n",
rate, XGIfb_vrate[i].refresh);
XGIfb_rate_idx = XGIfb_vrate[i].idx;
xgi_video_info.refresh_rate =
XGIfb_vrate[i].refresh;
} else if (((rate - XGIfb_vrate[i - 1].refresh)
<= 2) && (XGIfb_vrate[i].idx
!= 1)) {
DPRINTK("XGIfb: Adjusting rate from %d down to %d\n",
rate, XGIfb_vrate[i-1].refresh);
XGIfb_rate_idx = XGIfb_vrate[i - 1].idx;
xgi_video_info.refresh_rate =
XGIfb_vrate[i - 1].refresh;
}
break;
} else if ((rate - XGIfb_vrate[i].refresh) <= 2) {
DPRINTK("XGIfb: Adjusting rate from %d down to %d\n",
rate, XGIfb_vrate[i].refresh);
XGIfb_rate_idx = XGIfb_vrate[i].idx;
break;
}
}
i++;
}
if (XGIfb_rate_idx > 0) {
return XGIfb_rate_idx;
} else {
printk(KERN_INFO "XGIfb: Unsupported rate %d for %dx%d\n",
rate, xres, yres);
return 0;
}
}
static void XGIfb_search_tvstd(const char *name)
{
int i = 0;
if (name == NULL)
return;
while (XGI_tvtype[i].type_no != -1) {
if (!strcmp(name, XGI_tvtype[i].name)) {
XGIfb_tvmode = XGI_tvtype[i].type_no;
break;
}
i++;
}
}
/* ----------- FBDev related routines for all series ----------- */
static void XGIfb_bpp_to_var(struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 6;
xgi_video_info.video_cmap_len = 256;
break;
case 16:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
xgi_video_info.video_cmap_len = 16;
break;
case 32:
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
xgi_video_info.video_cmap_len = 16;
break;
}
}
/* --------------------- SetMode routines ------------------------- */
static void XGIfb_pre_setmode(void)
{
u8 cr30 = 0, cr31 = 0;
cr31 = xgifb_reg_get(XGICR, 0x31);
cr31 &= ~0x60;
switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
case DISPTYPE_CRT2:
cr30 = (XGI_VB_OUTPUT_CRT2 | XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
break;
case DISPTYPE_LCD:
cr30 = (XGI_VB_OUTPUT_LCD | XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
break;
case DISPTYPE_TV:
if (xgi_video_info.TV_type == TVMODE_HIVISION)
cr30 = (XGI_VB_OUTPUT_HIVISION
| XGI_SIMULTANEOUS_VIEW_ENABLE);
else if (xgi_video_info.TV_plug == TVPLUG_SVIDEO)
cr30 = (XGI_VB_OUTPUT_SVIDEO
| XGI_SIMULTANEOUS_VIEW_ENABLE);
else if (xgi_video_info.TV_plug == TVPLUG_COMPOSITE)
cr30 = (XGI_VB_OUTPUT_COMPOSITE
| XGI_SIMULTANEOUS_VIEW_ENABLE);
else if (xgi_video_info.TV_plug == TVPLUG_SCART)
cr30 = (XGI_VB_OUTPUT_SCART
| XGI_SIMULTANEOUS_VIEW_ENABLE);
cr31 |= XGI_DRIVER_MODE;
if (XGIfb_tvmode == 1 || xgi_video_info.TV_type == TVMODE_PAL)
cr31 |= 0x01;
else
cr31 &= ~0x01;
break;
default: /* disable CRT2 */
cr30 = 0x00;
cr31 |= (XGI_DRIVER_MODE | XGI_VB_OUTPUT_DISABLE);
}
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33, (XGIfb_rate_idx & 0x0F));
}
static void XGIfb_post_setmode(void)
{
u8 reg;
unsigned char doit = 1;
/*
xgifb_reg_set(XGISR,IND_XGI_PASSWORD,XGI_PASSWORD);
xgifb_reg_set(XGICR, 0x13, 0x00);
xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01);
*test*
*/
if (xgi_video_info.video_bpp == 8) {
/* TW: We can't switch off CRT1 on LVDS/Chrontel
* in 8bpp Modes */
if ((xgi_video_info.hasVB == HASVB_LVDS) ||
(xgi_video_info.hasVB == HASVB_LVDS_CHRONTEL)) {
doit = 0;
}
/* TW: We can't switch off CRT1 on 301B-DH
* in 8bpp Modes if using LCD */
if (xgi_video_info.disp_state & DISPTYPE_LCD)
doit = 0;
}
/* TW: We can't switch off CRT1 if bridge is in slave mode */
if (xgi_video_info.hasVB != HASVB_NONE) {
reg = xgifb_reg_get(XGIPART1, 0x00);
if ((reg & 0x50) == 0x10)
doit = 0;
} else {
XGIfb_crt1off = 0;
}
reg = xgifb_reg_get(XGICR, 0x17);
if ((XGIfb_crt1off) && (doit))
reg &= ~0x80;
else
reg |= 0x80;
xgifb_reg_set(XGICR, 0x17, reg);
xgifb_reg_and(XGISR, IND_XGI_RAMDAC_CONTROL, ~0x04);
if ((xgi_video_info.disp_state & DISPTYPE_TV) && (xgi_video_info.hasVB
== HASVB_301)) {
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg < 0xB0) { /* Set filter for XGI301 */
switch (xgi_video_info.video_width) {
case 320:
filter_tb = (xgi_video_info.TV_type ==
TVMODE_NTSC) ? 4 : 12;
break;
case 640:
filter_tb = (xgi_video_info.TV_type ==
TVMODE_NTSC) ? 5 : 13;
break;
case 720:
filter_tb = (xgi_video_info.TV_type ==
TVMODE_NTSC) ? 6 : 14;
break;
case 800:
filter_tb = (xgi_video_info.TV_type ==
TVMODE_NTSC) ? 7 : 15;
break;
default:
filter = -1;
break;
}
xgifb_reg_or(XGIPART1, XGIfb_CRT2_write_enable, 0x01);
if (xgi_video_info.TV_type == TVMODE_NTSC) {
xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
if (xgi_video_info.TV_plug == TVPLUG_SVIDEO) {
xgifb_reg_and(XGIPART2, 0x30, 0xdf);
} else if (xgi_video_info.TV_plug
== TVPLUG_COMPOSITE) {
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgi_video_info.video_width) {
case 640:
xgifb_reg_set(XGIPART2,
0x35,
0xEB);
xgifb_reg_set(XGIPART2,
0x36,
0x04);
xgifb_reg_set(XGIPART2,
0x37,
0x25);
xgifb_reg_set(XGIPART2,
0x38,
0x18);
break;
case 720:
xgifb_reg_set(XGIPART2,
0x35,
0xEE);
xgifb_reg_set(XGIPART2,
0x36,
0x0C);
xgifb_reg_set(XGIPART2,
0x37,
0x22);
xgifb_reg_set(XGIPART2,
0x38,
0x08);
break;
case 800:
xgifb_reg_set(XGIPART2,
0x35,
0xEB);
xgifb_reg_set(XGIPART2,
0x36,
0x15);
xgifb_reg_set(XGIPART2,
0x37,
0x25);
xgifb_reg_set(XGIPART2,
0x38,
0xF6);
break;
}
}
} else if (xgi_video_info.TV_type == TVMODE_PAL) {
xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
if (xgi_video_info.TV_plug == TVPLUG_SVIDEO) {
xgifb_reg_and(XGIPART2, 0x30, 0xDF);
} else if (xgi_video_info.TV_plug
== TVPLUG_COMPOSITE) {
xgifb_reg_or(XGIPART2, 0x30, 0x20);
switch (xgi_video_info.video_width) {
case 640:
xgifb_reg_set(XGIPART2,
0x35,
0xF1);
xgifb_reg_set(XGIPART2,
0x36,
0xF7);
xgifb_reg_set(XGIPART2,
0x37,
0x1F);
xgifb_reg_set(XGIPART2,
0x38,
0x32);
break;
case 720:
xgifb_reg_set(XGIPART2,
0x35,
0xF3);
xgifb_reg_set(XGIPART2,
0x36,
0x00);
xgifb_reg_set(XGIPART2,
0x37,
0x1D);
xgifb_reg_set(XGIPART2,
0x38,
0x20);
break;
case 800:
xgifb_reg_set(XGIPART2,
0x35,
0xFC);
xgifb_reg_set(XGIPART2,
0x36,
0xFB);
xgifb_reg_set(XGIPART2,
0x37,
0x14);
xgifb_reg_set(XGIPART2,
0x38,
0x2A);
break;
}
}
}
if ((filter >= 0) && (filter <= 7)) {
DPRINTK("FilterTable[%d]-%d: %02x %02x %02x %02x\n",
filter_tb, filter,
XGI_TV_filter[filter_tb].
filter[filter][0],
XGI_TV_filter[filter_tb].
filter[filter][1],
XGI_TV_filter[filter_tb].
filter[filter][2],
XGI_TV_filter[filter_tb].
filter[filter][3]
);
xgifb_reg_set(
XGIPART2,
0x35,
(XGI_TV_filter[filter_tb].
filter[filter][0]));
xgifb_reg_set(
XGIPART2,
0x36,
(XGI_TV_filter[filter_tb].
filter[filter][1]));
xgifb_reg_set(
XGIPART2,
0x37,
(XGI_TV_filter[filter_tb].
filter[filter][2]));
xgifb_reg_set(
XGIPART2,
0x38,
(XGI_TV_filter[filter_tb].
filter[filter][3]));
}
}
}
}
static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
struct fb_info *info)
{
unsigned int htotal = var->left_margin + var->xres + var->right_margin
+ var->hsync_len;
unsigned int vtotal = var->upper_margin + var->yres + var->lower_margin
+ var->vsync_len;
#if defined(__powerpc__)
u8 sr_data, cr_data;
#endif
unsigned int drate = 0, hrate = 0;
int found_mode = 0;
int old_mode;
/* unsigned char reg, reg1; */
DEBUGPRN("Inside do_set_var");
/* printk(KERN_DEBUG "XGIfb:var->yres=%d, var->upper_margin=%d, var->lower_margin=%d, var->vsync_len=%d\n", var->yres, var->upper_margin, var->lower_margin, var->vsync_len); */
info->var.xres_virtual = var->xres_virtual;
info->var.yres_virtual = var->yres_virtual;
info->var.bits_per_pixel = var->bits_per_pixel;
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED)
vtotal <<= 1;
else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE)
vtotal <<= 2;
else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
/* vtotal <<= 1; */
/* var->yres <<= 1; */
}
if (!htotal || !vtotal) {
DPRINTK("XGIfb: Invalid 'var' information\n");
return -EINVAL;
} printk(KERN_DEBUG "XGIfb: var->pixclock=%d, htotal=%d, vtotal=%d\n",
var->pixclock, htotal, vtotal);
if (var->pixclock && htotal && vtotal) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
xgi_video_info.refresh_rate = (unsigned int) (hrate * 2
/ vtotal);
} else {
xgi_video_info.refresh_rate = 60;
}
printk(KERN_DEBUG "XGIfb: Change mode to %dx%dx%d-%dHz\n",
var->xres,
var->yres,
var->bits_per_pixel,
xgi_video_info.refresh_rate);
old_mode = xgifb_mode_idx;
xgifb_mode_idx = 0;
while ((XGIbios_mode[xgifb_mode_idx].mode_no != 0)
&& (XGIbios_mode[xgifb_mode_idx].xres <= var->xres)) {
if ((XGIbios_mode[xgifb_mode_idx].xres == var->xres)
&& (XGIbios_mode[xgifb_mode_idx].yres
== var->yres)
&& (XGIbios_mode[xgifb_mode_idx].bpp
== var->bits_per_pixel)) {
XGIfb_mode_no = XGIbios_mode[xgifb_mode_idx].mode_no;
found_mode = 1;
break;
}
xgifb_mode_idx++;
}
if (found_mode)
xgifb_mode_idx = XGIfb_validate_mode(xgifb_mode_idx);
else
xgifb_mode_idx = -1;
if (xgifb_mode_idx < 0) {
printk(KERN_ERR "XGIfb: Mode %dx%dx%d not supported\n",
var->xres, var->yres, var->bits_per_pixel);
xgifb_mode_idx = old_mode;
return -EINVAL;
}
if (XGIfb_search_refresh_rate(xgi_video_info.refresh_rate) == 0) {
XGIfb_rate_idx = XGIbios_mode[xgifb_mode_idx].rate_idx;
xgi_video_info.refresh_rate = 60;
}
if (isactive) {
XGIfb_pre_setmode();
if (XGISetModeNew(&XGIhw_ext, XGIfb_mode_no) == 0) {
printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
XGIfb_mode_no);
return -EINVAL;
}
info->fix.line_length = ((info->var.xres_virtual
* info->var.bits_per_pixel) >> 6);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff));
xgifb_reg_set(XGISR,
0x0E,
(info->fix.line_length & 0xff00) >> 8);
XGIfb_post_setmode();
DPRINTK("XGIfb: Set new mode: %dx%dx%d-%d\n",
XGIbios_mode[xgifb_mode_idx].xres,
XGIbios_mode[xgifb_mode_idx].yres,
XGIbios_mode[xgifb_mode_idx].bpp,
xgi_video_info.refresh_rate);
xgi_video_info.video_bpp = XGIbios_mode[xgifb_mode_idx].bpp;
xgi_video_info.video_vwidth = info->var.xres_virtual;
xgi_video_info.video_width = XGIbios_mode[xgifb_mode_idx].xres;
xgi_video_info.video_vheight = info->var.yres_virtual;
xgi_video_info.video_height = XGIbios_mode[xgifb_mode_idx].yres;
xgi_video_info.org_x = xgi_video_info.org_y = 0;
xgi_video_info.video_linelength = info->var.xres_virtual
* (xgi_video_info.video_bpp >> 3);
switch (xgi_video_info.video_bpp) {
case 8:
xgi_video_info.DstColor = 0x0000;
xgi_video_info.XGI310_AccelDepth = 0x00000000;
xgi_video_info.video_cmap_len = 256;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, (cr_data & 0xE0));
#endif
break;
case 16:
xgi_video_info.DstColor = 0x8000;
xgi_video_info.XGI310_AccelDepth = 0x00010000;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x0B));
#endif
xgi_video_info.video_cmap_len = 16;
break;
case 32:
xgi_video_info.DstColor = 0xC000;
xgi_video_info.XGI310_AccelDepth = 0x00020000;
xgi_video_info.video_cmap_len = 16;
#if defined(__powerpc__)
cr_data = xgifb_reg_get(XGICR, 0x4D);
xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x15));
#endif
break;
default:
xgi_video_info.video_cmap_len = 16;
printk(KERN_ERR "XGIfb: Unsupported depth %d",
xgi_video_info.video_bpp);
break;
}
}
XGIfb_bpp_to_var(var); /*update ARGB info*/
DEBUGPRN("End of do_set_var");
dumpVGAReg();
return 0;
}
#ifdef XGIFB_PAN
static int XGIfb_pan_var(struct fb_var_screeninfo *var)
{
unsigned int base;
/* printk("Inside pan_var"); */
if (var->xoffset > (var->xres_virtual - var->xres)) {
/* printk("Pan: xo: %d xv %d xr %d\n",
var->xoffset, var->xres_virtual, var->xres); */
return -EINVAL;
}
if (var->yoffset > (var->yres_virtual - var->yres)) {
/* printk("Pan: yo: %d yv %d yr %d\n",
var->yoffset, var->yres_virtual, var->yres); */
return -EINVAL;
}
base = var->yoffset * var->xres_virtual + var->xoffset;
/* calculate base bpp dep. */
switch (var->bits_per_pixel) {
case 16:
base >>= 1;
break;
case 32:
break;
case 8:
default:
base >>= 2;
break;
}
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
xgifb_reg_set(XGICR, 0x0D, base & 0xFF);
xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF);
xgifb_reg_set(XGISR, 0x0D, (base >> 16) & 0xFF);
xgifb_reg_set(XGISR, 0x37, (base >> 24) & 0x03);
xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04);
if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
xgifb_reg_or(XGIPART1, XGIfb_CRT2_write_enable, 0x01);
xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF));
xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF));
xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF));
xgifb_reg_and_or(XGIPART1,
0x02,
0x7F,
((base >> 24) & 0x01) << 7);
}
/* printk("End of pan_var"); */
return 0;
}
#endif
static int XGIfb_open(struct fb_info *info, int user)
{
return 0;
}
static int XGIfb_release(struct fb_info *info, int user)
{
return 0;
}
static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
{
int rc = 16;
switch (var->bits_per_pixel) {
case 8:
rc = 256;
break;
case 16:
rc = 16;
break;
case 32:
rc = 16;
break;
}
return rc;
}
static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
if (regno >= XGIfb_get_cmap_len(&info->var))
return 1;
switch (info->var.bits_per_pixel) {
case 8:
outb(regno, XGIDACA);
outb((red >> 10), XGIDACD);
outb((green >> 10), XGIDACD);
outb((blue >> 10), XGIDACD);
if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
outb(regno, XGIDAC2A);
outb((red >> 8), XGIDAC2D);
outb((green >> 8), XGIDAC2D);
outb((blue >> 8), XGIDAC2D);
}
break;
case 16:
((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800))
| ((green & 0xfc00) >> 5) | ((blue & 0xf800)
>> 11);
break;
case 32:
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green
<< 8) | (blue);
break;
}
return 0;
}
/* ----------- FBDev related routines for all series ---------- */
static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
struct fb_info *info)
{
DEBUGPRN("inside get_fix");
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, myid);
fix->smem_start = xgi_video_info.video_base;
fix->smem_len = xgi_video_info.video_size;
fix->type = video_type;
fix->type_aux = 0;
if (xgi_video_info.video_bpp == 8)
fix->visual = FB_VISUAL_PSEUDOCOLOR;
else
fix->visual = FB_VISUAL_DIRECTCOLOR;
fix->xpanstep = 0;
#ifdef XGIFB_PAN
if (XGIfb_ypan)
fix->ypanstep = 1;
#endif
fix->ywrapstep = 0;
fix->line_length = xgi_video_info.video_linelength;
fix->mmio_start = xgi_video_info.mmio_base;
fix->mmio_len = xgi_video_info.mmio_size;
fix->accel = FB_ACCEL_XGI_XABRE;
DEBUGPRN("end of get_fix");
return 0;
}
static int XGIfb_set_par(struct fb_info *info)
{
int err;
/* printk("XGIfb: inside set_par\n"); */
err = XGIfb_do_set_var(&info->var, 1, info);
if (err)
return err;
XGIfb_get_fix(&info->fix, -1, info);
/* printk("XGIfb: end of set_par\n"); */
return 0;
}
static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
unsigned int htotal = var->left_margin + var->xres + var->right_margin
+ var->hsync_len;
unsigned int vtotal = 0;
unsigned int drate = 0, hrate = 0;
int found_mode = 0;
int refresh_rate, search_idx;
DEBUGPRN("Inside check_var");
if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
vtotal = var->upper_margin + var->yres + var->lower_margin
+ var->vsync_len;
vtotal <<= 1;
} else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
vtotal = var->upper_margin + var->yres + var->lower_margin
+ var->vsync_len;
vtotal <<= 2;
} else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
vtotal = var->upper_margin + (var->yres / 2)
+ var->lower_margin + var->vsync_len;
} else
vtotal = var->upper_margin + var->yres + var->lower_margin
+ var->vsync_len;
if (!(htotal) || !(vtotal))
XGIFAIL("XGIfb: no valid timing data");
if (var->pixclock && htotal && vtotal) {
drate = 1000000000 / var->pixclock;
hrate = (drate * 1000) / htotal;
xgi_video_info.refresh_rate =
(unsigned int) (hrate * 2 / vtotal);
printk(KERN_DEBUG
"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
__func__, var->pixclock, htotal, vtotal,
__func__, drate, hrate, xgi_video_info.refresh_rate);
} else {
xgi_video_info.refresh_rate = 60;
}
/*
if ((var->pixclock) && (htotal)) {
drate = 1E12 / var->pixclock;
hrate = drate / htotal;
refresh_rate = (unsigned int) (hrate / vtotal * 2 + 0.5);
} else {
refresh_rate = 60;
}
*/
/* TW: Calculation wrong for 1024x600 - force it to 60Hz */
if ((var->xres == 1024) && (var->yres == 600))
refresh_rate = 60;
search_idx = 0;
while ((XGIbios_mode[search_idx].mode_no != 0) &&
(XGIbios_mode[search_idx].xres <= var->xres)) {
if ((XGIbios_mode[search_idx].xres == var->xres) &&
(XGIbios_mode[search_idx].yres == var->yres) &&
(XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
if (XGIfb_validate_mode(search_idx) > 0) {
found_mode = 1;
break;
}
}
search_idx++;
}
if (!found_mode) {
printk(KERN_ERR "XGIfb: %dx%dx%d is no valid mode\n",
var->xres, var->yres, var->bits_per_pixel);
search_idx = 0;
while (XGIbios_mode[search_idx].mode_no != 0) {
if ((var->xres <= XGIbios_mode[search_idx].xres) &&
(var->yres <= XGIbios_mode[search_idx].yres) &&
(var->bits_per_pixel ==
XGIbios_mode[search_idx].bpp)) {
if (XGIfb_validate_mode(search_idx) > 0) {
found_mode = 1;
break;
}
}
search_idx++;
}
if (found_mode) {
var->xres = XGIbios_mode[search_idx].xres;
var->yres = XGIbios_mode[search_idx].yres;
printk(KERN_DEBUG "XGIfb: Adapted to mode %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
} else {
printk(KERN_ERR "XGIfb: Failed to find similar mode to %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
return -EINVAL;
}
}
/* TW: TODO: Check the refresh rate */
/* Adapt RGB settings */
XGIfb_bpp_to_var(var);
/* Sanity check for offsets */
if (var->xoffset < 0)
var->xoffset = 0;
if (var->yoffset < 0)
var->yoffset = 0;
if (!XGIfb_ypan) {
if (var->xres != var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres != var->yres_virtual)
var->yres_virtual = var->yres;
} /* else { */
/* TW: Now patch yres_virtual if we use panning */
/* May I do this? */
/* var->yres_virtual = xgi_video_info.heapstart /
(var->xres * (var->bits_per_pixel >> 3)); */
/* if (var->yres_virtual <= var->yres) { */
/* TW: Paranoia check */
/* var->yres_virtual = var->yres; */
/* } */
/* } */
/* Truncate offsets to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
/* Set everything else to 0 */
var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
DEBUGPRN("end of check_var");
return 0;
}
#ifdef XGIFB_PAN
static int XGIfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err;
/* printk("\nInside pan_display:\n"); */
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0 || var->yoffset >= info->var.yres_virtual
|| var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + info->var.xres > info->var.xres_virtual
|| var->yoffset + info->var.yres
> info->var.yres_virtual)
return -EINVAL;
}
err = XGIfb_pan_var(var);
if (err < 0)
return err;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
/* printk("End of pan_display\n"); */
return 0;
}
#endif
static int XGIfb_blank(int blank, struct fb_info *info)
{
u8 reg;
reg = xgifb_reg_get(XGICR, 0x17);
if (blank > 0)
reg &= 0x7f;
else
reg |= 0x80;
xgifb_reg_set(XGICR, 0x17, reg);
xgifb_reg_set(XGISR, 0x00, 0x01); /* Synchronous Reset */
xgifb_reg_set(XGISR, 0x00, 0x03); /* End Reset */
return 0;
}
static struct fb_ops XGIfb_ops = {
.owner = THIS_MODULE,
.fb_open = XGIfb_open,
.fb_release = XGIfb_release,
.fb_check_var = XGIfb_check_var,
.fb_set_par = XGIfb_set_par,
.fb_setcolreg = XGIfb_setcolreg,
#ifdef XGIFB_PAN
.fb_pan_display = XGIfb_pan_display,
#endif
.fb_blank = XGIfb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
/* .fb_mmap = XGIfb_mmap, */
};
/* ---------------- Chip generation dependent routines ---------------- */
/* for XGI 315/550/650/740/330 */
static int XGIfb_get_dram_size(void)
{
u8 ChannelNum, tmp;
u8 reg = 0;
/* xorg driver sets 32MB * 1 channel */
if (xgi_video_info.chip == XG27)
xgifb_reg_set(XGISR, IND_XGI_DRAM_SIZE, 0x51);
reg = xgifb_reg_get(XGISR, IND_XGI_DRAM_SIZE);
switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) {
case XGI_DRAM_SIZE_1MB:
xgi_video_info.video_size = 0x100000;
break;
case XGI_DRAM_SIZE_2MB:
xgi_video_info.video_size = 0x200000;
break;
case XGI_DRAM_SIZE_4MB:
xgi_video_info.video_size = 0x400000;
break;
case XGI_DRAM_SIZE_8MB:
xgi_video_info.video_size = 0x800000;
break;
case XGI_DRAM_SIZE_16MB:
xgi_video_info.video_size = 0x1000000;
break;
case XGI_DRAM_SIZE_32MB:
xgi_video_info.video_size = 0x2000000;
break;
case XGI_DRAM_SIZE_64MB:
xgi_video_info.video_size = 0x4000000;
break;
case XGI_DRAM_SIZE_128MB:
xgi_video_info.video_size = 0x8000000;
break;
case XGI_DRAM_SIZE_256MB:
xgi_video_info.video_size = 0x10000000;
break;
default:
return -1;
}
tmp = (reg & 0x0c) >> 2;
switch (xgi_video_info.chip) {
case XG20:
case XG21:
case XG27:
ChannelNum = 1;
break;
case XG42:
if (reg & 0x04)
ChannelNum = 2;
else
ChannelNum = 1;
break;
case XG45:
if (tmp == 1)
ChannelNum = 2;
else if (tmp == 2)
ChannelNum = 3;
else if (tmp == 3)
ChannelNum = 4;
else
ChannelNum = 1;
break;
case XG40:
default:
if (tmp == 2)
ChannelNum = 2;
else if (tmp == 3)
ChannelNum = 3;
else
ChannelNum = 1;
break;
}
xgi_video_info.video_size = xgi_video_info.video_size * ChannelNum;
/* PLiad fixed for benchmarking and fb set */
/* xgi_video_info.video_size = 0x200000; */ /* 1024x768x16 */
/* xgi_video_info.video_size = 0x1000000; */ /* benchmark */
printk("XGIfb: SR14=%x DramSzie %x ChannelNum %x\n",
reg,
xgi_video_info.video_size, ChannelNum);
return 0;
}
static void XGIfb_detect_VB(void)
{
u8 cr32, temp = 0;
xgi_video_info.TV_plug = xgi_video_info.TV_type = 0;
switch (xgi_video_info.hasVB) {
case HASVB_LVDS_CHRONTEL:
case HASVB_CHRONTEL:
break;
case HASVB_301:
case HASVB_302:
/* XGI_Sense30x(); */ /* Yi-Lin TV Sense? */
break;
}
cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
if ((cr32 & XGI_CRT1) && !XGIfb_crt1off)
XGIfb_crt1off = 0;
else {
if (cr32 & 0x5F)
XGIfb_crt1off = 1;
else
XGIfb_crt1off = 0;
}
if (XGIfb_crt2type != -1)
/* TW: Override with option */
xgi_video_info.disp_state = XGIfb_crt2type;
else if (cr32 & XGI_VB_TV)
xgi_video_info.disp_state = DISPTYPE_TV;
else if (cr32 & XGI_VB_LCD)
xgi_video_info.disp_state = DISPTYPE_LCD;
else if (cr32 & XGI_VB_CRT2)
xgi_video_info.disp_state = DISPTYPE_CRT2;
else
xgi_video_info.disp_state = 0;
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
xgi_video_info.TV_plug = XGIfb_tvplug;
else if (cr32 & XGI_VB_HIVISION) {
xgi_video_info.TV_type = TVMODE_HIVISION;
xgi_video_info.TV_plug = TVPLUG_SVIDEO;
} else if (cr32 & XGI_VB_SVIDEO)
xgi_video_info.TV_plug = TVPLUG_SVIDEO;
else if (cr32 & XGI_VB_COMPOSITE)
xgi_video_info.TV_plug = TVPLUG_COMPOSITE;
else if (cr32 & XGI_VB_SCART)
xgi_video_info.TV_plug = TVPLUG_SCART;
if (xgi_video_info.TV_type == 0) {
temp = xgifb_reg_get(XGICR, 0x38);
if (temp & 0x10)
xgi_video_info.TV_type = TVMODE_PAL;
else
xgi_video_info.TV_type = TVMODE_NTSC;
}
/* TW: Copy forceCRT1 option to CRT1off if option is given */
if (XGIfb_forcecrt1 != -1) {
if (XGIfb_forcecrt1)
XGIfb_crt1off = 0;
else
XGIfb_crt1off = 1;
}
}
static int XGIfb_has_VB(void)
{
u8 vb_chipid;
vb_chipid = xgifb_reg_get(XGIPART4, 0x00);
switch (vb_chipid) {
case 0x01:
xgi_video_info.hasVB = HASVB_301;
break;
case 0x02:
xgi_video_info.hasVB = HASVB_302;
break;
default:
xgi_video_info.hasVB = HASVB_NONE;
return 0;
}
return 1;
}
static void XGIfb_get_VB_type(void)
{
u8 reg;
if (!XGIfb_has_VB()) {
reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37);
switch ((reg & XGI_EXTERNAL_CHIP_MASK) >> 1) {
case XGI310_EXTERNAL_CHIP_LVDS:
xgi_video_info.hasVB = HASVB_LVDS;
break;
case XGI310_EXTERNAL_CHIP_LVDS_CHRONTEL:
xgi_video_info.hasVB = HASVB_LVDS_CHRONTEL;
break;
default:
break;
}
}
}
XGIINITSTATIC int __init XGIfb_setup(char *options)
{
char *this_opt;
xgi_video_info.refresh_rate = 0;
printk(KERN_INFO "XGIfb: Options %s\n", options);
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if (!strncmp(this_opt, "mode:", 5)) {
XGIfb_search_mode(this_opt + 5);
} else if (!strncmp(this_opt, "vesa:", 5)) {
XGIfb_search_vesamode(simple_strtoul(
this_opt + 5, NULL, 0));
} else if (!strncmp(this_opt, "mode:", 5)) {
XGIfb_search_mode(this_opt + 5);
} else if (!strncmp(this_opt, "vesa:", 5)) {
XGIfb_search_vesamode(simple_strtoul(
this_opt + 5, NULL, 0));
} else if (!strncmp(this_opt, "vrate:", 6)) {
xgi_video_info.refresh_rate = simple_strtoul(
this_opt + 6, NULL, 0);
} else if (!strncmp(this_opt, "rate:", 5)) {
xgi_video_info.refresh_rate = simple_strtoul(
this_opt + 5, NULL, 0);
} else if (!strncmp(this_opt, "off", 3)) {
XGIfb_off = 1;
} else if (!strncmp(this_opt, "crt1off", 7)) {
XGIfb_crt1off = 1;
} else if (!strncmp(this_opt, "filter:", 7)) {
filter = (int)simple_strtoul(this_opt + 7, NULL, 0);
} else if (!strncmp(this_opt, "forcecrt2type:", 14)) {
XGIfb_search_crt2type(this_opt + 14);
} else if (!strncmp(this_opt, "forcecrt1:", 10)) {
XGIfb_forcecrt1 = (int)simple_strtoul(
this_opt + 10, NULL, 0);
} else if (!strncmp(this_opt, "tvmode:", 7)) {
XGIfb_search_tvstd(this_opt + 7);
} else if (!strncmp(this_opt, "tvstandard:", 11)) {
XGIfb_search_tvstd(this_opt + 7);
} else if (!strncmp(this_opt, "dstn", 4)) {
enable_dstn = 1;
/* TW: DSTN overrules forcecrt2type */
XGIfb_crt2type = DISPTYPE_LCD;
} else if (!strncmp(this_opt, "pdc:", 4)) {
XGIfb_pdc = simple_strtoul(this_opt + 4, NULL, 0);
if (XGIfb_pdc & ~0x3c) {
printk(KERN_INFO "XGIfb: Illegal pdc parameter\n");
XGIfb_pdc = 0;
}
} else if (!strncmp(this_opt, "noypan", 6)) {
XGIfb_ypan = 0;
} else if (!strncmp(this_opt, "userom:", 7)) {
XGIfb_userom = (int)simple_strtoul(
this_opt + 7, NULL, 0);
/* } else if (!strncmp(this_opt, "useoem:", 7)) { */
/* XGIfb_useoem = (int)simple_strtoul(
this_opt + 7, NULL, 0); */
} else {
XGIfb_search_mode(this_opt);
/* printk(KERN_INFO "XGIfb: Invalid option %s\n",
this_opt); */
}
/* TW: Panning only with acceleration */
XGIfb_ypan = 0;
}
printk("\nxgifb: outa xgifb_setup 3450");
return 0;
}
static unsigned char *xgifb_copy_rom(struct pci_dev *dev)
{
void __iomem *rom_address;
unsigned char *rom_copy;
size_t rom_size;
rom_address = pci_map_rom(dev, &rom_size);
if (rom_address == NULL)
return NULL;
rom_copy = vzalloc(XGIFB_ROM_SIZE);
if (rom_copy == NULL)
goto done;
rom_size = min_t(size_t, rom_size, XGIFB_ROM_SIZE);
memcpy_fromio(rom_copy, rom_address, rom_size);
done:
pci_unmap_rom(dev, rom_address);
return rom_copy;
}
static int __devinit xgifb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
int ret;
if (XGIfb_off)
return -ENXIO;
XGIfb_registered = 0;
memset(&XGIhw_ext, 0, sizeof(struct xgi_hw_device_info));
fb_info = framebuffer_alloc(sizeof(struct fb_info), &pdev->dev);
if (!fb_info)
return -ENOMEM;
xgi_video_info.chip_id = pdev->device;
pci_read_config_byte(pdev,
PCI_REVISION_ID,
&xgi_video_info.revision_id);
XGIhw_ext.jChipRevision = xgi_video_info.revision_id;
xgi_video_info.pcibus = pdev->bus->number;
xgi_video_info.pcislot = PCI_SLOT(pdev->devfn);
xgi_video_info.pcifunc = PCI_FUNC(pdev->devfn);
xgi_video_info.subsysvendor = pdev->subsystem_vendor;
xgi_video_info.subsysdevice = pdev->subsystem_device;
xgi_video_info.video_base = pci_resource_start(pdev, 0);
xgi_video_info.mmio_base = pci_resource_start(pdev, 1);
xgi_video_info.mmio_size = pci_resource_len(pdev, 1);
xgi_video_info.vga_base = pci_resource_start(pdev, 2) + 0x30;
XGIhw_ext.pjIOAddress = (unsigned char *)xgi_video_info.vga_base;
/* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */
printk("XGIfb: Relocate IO address: %lx [%08lx]\n",
(unsigned long)pci_resource_start(pdev, 2), XGI_Pr.RelIO);
if (pci_enable_device(pdev)) {
ret = -EIO;
goto error;
}
XGIRegInit(&XGI_Pr, (unsigned long)XGIhw_ext.pjIOAddress);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
reg1 = xgifb_reg_get(XGISR, IND_XGI_PASSWORD);
if (reg1 != 0xa1) { /*I/O error */
printk("\nXGIfb: I/O error!!!");
ret = -EIO;
goto error;
}
switch (xgi_video_info.chip_id) {
case PCI_DEVICE_ID_XG_20:
xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
if (CR48&GPIOG_READ)
xgi_video_info.chip = XG21;
else
xgi_video_info.chip = XG20;
XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
break;
case PCI_DEVICE_ID_XG_40:
xgi_video_info.chip = XG40;
XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
break;
case PCI_DEVICE_ID_XG_41:
xgi_video_info.chip = XG41;
XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
break;
case PCI_DEVICE_ID_XG_42:
xgi_video_info.chip = XG42;
XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
break;
case PCI_DEVICE_ID_XG_27:
xgi_video_info.chip = XG27;
XGIfb_CRT2_write_enable = IND_XGI_CRT2_WRITE_ENABLE_315;
break;
default:
ret = -ENODEV;
goto error;
}
printk("XGIfb:chipid = %x\n", xgi_video_info.chip);
XGIhw_ext.jChipType = xgi_video_info.chip;
if ((xgi_video_info.chip == XG21) || (XGIfb_userom)) {
XGIhw_ext.pjVirtualRomBase = xgifb_copy_rom(pdev);
if (XGIhw_ext.pjVirtualRomBase)
printk(KERN_INFO "XGIfb: Video ROM found and mapped to %p\n",
XGIhw_ext.pjVirtualRomBase);
else
printk(KERN_INFO "XGIfb: Video ROM not found\n");
} else {
XGIhw_ext.pjVirtualRomBase = NULL;
printk(KERN_INFO "XGIfb: Video ROM usage disabled\n");
}
XGIhw_ext.pQueryVGAConfigSpace = &XGIfb_query_VGA_config_space;
if (XGIfb_get_dram_size()) {
printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
goto error;
}
if ((xgifb_mode_idx < 0) ||
((XGIbios_mode[xgifb_mode_idx].mode_no) != 0xFF)) {
/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
xgifb_reg_or(XGISR,
IND_XGI_PCI_ADDRESS_SET,
(XGI_PCI_ADDR_ENABLE | XGI_MEM_MAP_IO_ENABLE));
/* Enable 2D accelerator engine */
xgifb_reg_or(XGISR, IND_XGI_MODULE_ENABLE, XGI_ENABLE_2D);
}
XGIhw_ext.ulVideoMemorySize = xgi_video_info.video_size;
if (!request_mem_region(xgi_video_info.video_base,
xgi_video_info.video_size,
"XGIfb FB")) {
printk("unable request memory size %x",
xgi_video_info.video_size);
printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve frame buffer memory\n");
printk(KERN_ERR "XGIfb: Is there another framebuffer driver active?\n");
ret = -ENODEV;
goto error;
}
if (!request_mem_region(xgi_video_info.mmio_base,
xgi_video_info.mmio_size,
"XGIfb MMIO")) {
printk(KERN_ERR "XGIfb: Fatal error: Unable to reserve MMIO region\n");
ret = -ENODEV;
goto error_0;
}
xgi_video_info.video_vbase = XGIhw_ext.pjVideoMemoryAddress =
ioremap(xgi_video_info.video_base, xgi_video_info.video_size);
xgi_video_info.mmio_vbase = ioremap(xgi_video_info.mmio_base,
xgi_video_info.mmio_size);
printk(KERN_INFO "XGIfb: Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
xgi_video_info.video_base,
xgi_video_info.video_vbase,
xgi_video_info.video_size / 1024);
printk(KERN_INFO "XGIfb: MMIO at 0x%lx, mapped to 0x%p, size %ldk\n",
xgi_video_info.mmio_base, xgi_video_info.mmio_vbase,
xgi_video_info.mmio_size / 1024);
printk("XGIfb: XGIInitNew() ...");
if (XGIInitNew(&XGIhw_ext))
printk("OK\n");
else
printk("Fail\n");
xgi_video_info.mtrr = (unsigned int) 0;
if ((xgifb_mode_idx < 0) ||
((XGIbios_mode[xgifb_mode_idx].mode_no) != 0xFF)) {
xgi_video_info.hasVB = HASVB_NONE;
if ((xgi_video_info.chip == XG20) ||
(xgi_video_info.chip == XG27)) {
xgi_video_info.hasVB = HASVB_NONE;
} else if (xgi_video_info.chip == XG21) {
CR38 = xgifb_reg_get(XGICR, 0x38);
if ((CR38&0xE0) == 0xC0) {
xgi_video_info.disp_state = DISPTYPE_LCD;
if (!XGIfb_GetXG21LVDSData()) {
int m;
for (m = 0; m < sizeof(XGI21_LCDCapList)/sizeof(struct XGI21_LVDSCapStruct); m++) {
if ((XGI21_LCDCapList[m].LVDSHDE == XGIbios_mode[xgifb_mode_idx].xres) &&
(XGI21_LCDCapList[m].LVDSVDE == XGIbios_mode[xgifb_mode_idx].yres)) {
xgifb_reg_set(XGI_Pr.P3d4, 0x36, m);
}
}
}
} else if ((CR38&0xE0) == 0x60) {
xgi_video_info.hasVB = HASVB_CHRONTEL;
} else {
xgi_video_info.hasVB = HASVB_NONE;
}
} else {
XGIfb_get_VB_type();
}
XGIhw_ext.ujVBChipID = VB_CHIP_UNKNOWN;
XGIhw_ext.ulExternalChip = 0;
switch (xgi_video_info.hasVB) {
case HASVB_301:
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
XGIhw_ext.ujVBChipID = VB_CHIP_302LV;
printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
XGIhw_ext.ujVBChipID = VB_CHIP_301LV;
printk(KERN_INFO "XGIfb: XGI301LV bridge detected (revision 0x%02x)\n", reg);
}
/* else if (reg >= 0xB0) {
XGIhw_ext.ujVBChipID = VB_CHIP_301B;
reg1 = xgifb_reg_get(XGIPART4, 0x23);
printk("XGIfb: XGI301B bridge detected\n");
} */
else {
XGIhw_ext.ujVBChipID = VB_CHIP_301;
printk("XGIfb: XGI301 bridge detected\n");
}
break;
case HASVB_302:
reg = xgifb_reg_get(XGIPART4, 0x01);
if (reg >= 0xE0) {
XGIhw_ext.ujVBChipID = VB_CHIP_302LV;
printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xD0) {
XGIhw_ext.ujVBChipID = VB_CHIP_301LV;
printk(KERN_INFO "XGIfb: XGI302LV bridge detected (revision 0x%02x)\n", reg);
} else if (reg >= 0xB0) {
reg1 = xgifb_reg_get(XGIPART4, 0x23);
XGIhw_ext.ujVBChipID = VB_CHIP_302B;
} else {
XGIhw_ext.ujVBChipID = VB_CHIP_302;
printk(KERN_INFO "XGIfb: XGI302 bridge detected\n");
}
break;
case HASVB_LVDS:
XGIhw_ext.ulExternalChip = 0x1;
printk(KERN_INFO "XGIfb: LVDS transmitter detected\n");
break;
case HASVB_TRUMPION:
XGIhw_ext.ulExternalChip = 0x2;
printk(KERN_INFO "XGIfb: Trumpion Zurac LVDS scaler detected\n");
break;
case HASVB_CHRONTEL:
XGIhw_ext.ulExternalChip = 0x4;
printk(KERN_INFO "XGIfb: Chrontel TV encoder detected\n");
break;
case HASVB_LVDS_CHRONTEL:
XGIhw_ext.ulExternalChip = 0x5;
printk(KERN_INFO "XGIfb: LVDS transmitter and Chrontel TV encoder detected\n");
break;
default:
printk(KERN_INFO "XGIfb: No or unknown bridge type detected\n");
break;
}
if (xgi_video_info.hasVB != HASVB_NONE)
XGIfb_detect_VB();
if (xgi_video_info.disp_state & DISPTYPE_DISP2) {
if (XGIfb_crt1off)
xgi_video_info.disp_state |= DISPMODE_SINGLE;
else
xgi_video_info.disp_state |= (DISPMODE_MIRROR |
DISPTYPE_CRT1);
} else {
xgi_video_info.disp_state = DISPMODE_SINGLE |
DISPTYPE_CRT1;
}
if (xgi_video_info.disp_state & DISPTYPE_LCD) {
if (!enable_dstn) {
reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL);
reg &= 0x0f;
XGIhw_ext.ulCRT2LCDType = XGI310paneltype[reg];
} else {
/* TW: FSTN/DSTN */
XGIhw_ext.ulCRT2LCDType = LCD_320x480;
}
}
XGIfb_detectedpdc = 0;
XGIfb_detectedlcda = 0xff;
/* TW: Try to find about LCDA */
if ((XGIhw_ext.ujVBChipID == VB_CHIP_302B) ||
(XGIhw_ext.ujVBChipID == VB_CHIP_301LV) ||
(XGIhw_ext.ujVBChipID == VB_CHIP_302LV)) {
int tmp;
tmp = xgifb_reg_get(XGICR, 0x34);
if (tmp <= 0x13) {
/* Currently on LCDA?
*(Some BIOSes leave CR38) */
tmp = xgifb_reg_get(XGICR, 0x38);
if ((tmp & 0x03) == 0x03) {
/* XGI_Pr.XGI_UseLCDA = 1; */
} else {
/* Currently on LCDA?
*(Some newer BIOSes set D0 in CR35) */
tmp = xgifb_reg_get(XGICR, 0x35);
if (tmp & 0x01) {
/* XGI_Pr.XGI_UseLCDA = 1; */
} else {
tmp = xgifb_reg_get(XGICR,
0x30);
if (tmp & 0x20) {
tmp = xgifb_reg_get(
XGIPART1, 0x13);
if (tmp & 0x04) {
/* XGI_Pr.XGI_UseLCDA = 1; */
}
}
}
}
}
}
if (xgifb_mode_idx >= 0)
xgifb_mode_idx = XGIfb_validate_mode(xgifb_mode_idx);
if (xgifb_mode_idx < 0) {
switch (xgi_video_info.disp_state & DISPTYPE_DISP2) {
case DISPTYPE_LCD:
xgifb_mode_idx = DEFAULT_LCDMODE;
if (xgi_video_info.chip == XG21)
xgifb_mode_idx =
XGIfb_GetXG21DefaultLVDSModeIdx();
break;
case DISPTYPE_TV:
xgifb_mode_idx = DEFAULT_TVMODE;
break;
default:
xgifb_mode_idx = DEFAULT_MODE;
break;
}
}
XGIfb_mode_no = XGIbios_mode[xgifb_mode_idx].mode_no;
/* yilin set default refresh rate */
if (xgi_video_info.refresh_rate == 0)
xgi_video_info.refresh_rate = 60;
if (XGIfb_search_refresh_rate(
xgi_video_info.refresh_rate) == 0) {
XGIfb_rate_idx = XGIbios_mode[xgifb_mode_idx].rate_idx;
xgi_video_info.refresh_rate = 60;
}
xgi_video_info.video_bpp = XGIbios_mode[xgifb_mode_idx].bpp;
xgi_video_info.video_vwidth =
xgi_video_info.video_width =
XGIbios_mode[xgifb_mode_idx].xres;
xgi_video_info.video_vheight =
xgi_video_info.video_height =
XGIbios_mode[xgifb_mode_idx].yres;
xgi_video_info.org_x = xgi_video_info.org_y = 0;
xgi_video_info.video_linelength =
xgi_video_info.video_width *
(xgi_video_info.video_bpp >> 3);
switch (xgi_video_info.video_bpp) {
case 8:
xgi_video_info.DstColor = 0x0000;
xgi_video_info.XGI310_AccelDepth = 0x00000000;
xgi_video_info.video_cmap_len = 256;
break;
case 16:
xgi_video_info.DstColor = 0x8000;
xgi_video_info.XGI310_AccelDepth = 0x00010000;
xgi_video_info.video_cmap_len = 16;
break;
case 32:
xgi_video_info.DstColor = 0xC000;
xgi_video_info.XGI310_AccelDepth = 0x00020000;
xgi_video_info.video_cmap_len = 16;
break;
default:
xgi_video_info.video_cmap_len = 16;
printk(KERN_INFO "XGIfb: Unsupported depth %d",
xgi_video_info.video_bpp);
break;
}
printk(KERN_INFO "XGIfb: Default mode is %dx%dx%d (%dHz)\n",
xgi_video_info.video_width,
xgi_video_info.video_height,
xgi_video_info.video_bpp,
xgi_video_info.refresh_rate);
default_var.xres =
default_var.xres_virtual =
xgi_video_info.video_width;
default_var.yres =
default_var.yres_virtual =
xgi_video_info.video_height;
default_var.bits_per_pixel = xgi_video_info.video_bpp;
XGIfb_bpp_to_var(&default_var);
default_var.pixclock = (u32) (1000000000 /
XGIfb_mode_rate_to_dclock(&XGI_Pr, &XGIhw_ext,
XGIfb_mode_no, XGIfb_rate_idx));
if (XGIfb_mode_rate_to_ddata(&XGI_Pr, &XGIhw_ext,
XGIfb_mode_no, XGIfb_rate_idx,
&default_var.left_margin, &default_var.right_margin,
&default_var.upper_margin, &default_var.lower_margin,
&default_var.hsync_len, &default_var.vsync_len,
&default_var.sync, &default_var.vmode)) {
if ((default_var.vmode & FB_VMODE_MASK) ==
FB_VMODE_INTERLACED) {
default_var.yres <<= 1;
default_var.yres_virtual <<= 1;
} else if ((default_var.vmode & FB_VMODE_MASK) ==
FB_VMODE_DOUBLE) {
default_var.pixclock >>= 1;
default_var.yres >>= 1;
default_var.yres_virtual >>= 1;
}
}
fb_info->flags = FBINFO_FLAG_DEFAULT;
fb_info->var = default_var;
fb_info->fix = XGIfb_fix;
fb_info->par = &xgi_video_info;
fb_info->screen_base = xgi_video_info.video_vbase;
fb_info->fbops = &XGIfb_ops;
XGIfb_get_fix(&fb_info->fix, -1, fb_info);
fb_info->pseudo_palette = pseudo_palette;
fb_alloc_cmap(&fb_info->cmap, 256 , 0);
#ifdef CONFIG_MTRR
xgi_video_info.mtrr = mtrr_add(
(unsigned int) xgi_video_info.video_base,
(unsigned int) xgi_video_info.video_size,
MTRR_TYPE_WRCOMB, 1);
if (xgi_video_info.mtrr)
printk(KERN_INFO "XGIfb: Added MTRRs\n");
#endif
if (register_framebuffer(fb_info) < 0) {
ret = -EINVAL;
goto error_1;
}
XGIfb_registered = 1;
printk(KERN_INFO "fb%d: %s frame buffer device, Version %d.%d.%02d\n",
fb_info->node, myid, VER_MAJOR, VER_MINOR, VER_LEVEL);
}
dumpVGAReg();
return 0;
error_1:
iounmap(xgi_video_info.mmio_vbase);
iounmap(xgi_video_info.video_vbase);
release_mem_region(xgi_video_info.mmio_base, xgi_video_info.mmio_size);
error_0:
release_mem_region(xgi_video_info.video_base,
xgi_video_info.video_size);
error:
vfree(XGIhw_ext.pjVirtualRomBase);
framebuffer_release(fb_info);
return ret;
}
/*****************************************************/
/* PCI DEVICE HANDLING */
/*****************************************************/
static void __devexit xgifb_remove(struct pci_dev *pdev)
{
unregister_framebuffer(fb_info);
iounmap(xgi_video_info.mmio_vbase);
iounmap(xgi_video_info.video_vbase);
release_mem_region(xgi_video_info.mmio_base, xgi_video_info.mmio_size);
release_mem_region(xgi_video_info.video_base,
xgi_video_info.video_size);
vfree(XGIhw_ext.pjVirtualRomBase);
framebuffer_release(fb_info);
pci_set_drvdata(pdev, NULL);
}
static struct pci_driver xgifb_driver = {
.name = "xgifb",
.id_table = xgifb_pci_table,
.probe = xgifb_probe,
.remove = __devexit_p(xgifb_remove)
};
XGIINITSTATIC int __init xgifb_init(void)
{
char *option = NULL;
if (fb_get_options("xgifb", &option))
return -ENODEV;
XGIfb_setup(option);
return pci_register_driver(&xgifb_driver);
}
#ifndef MODULE
module_init(xgifb_init);
#endif
/*****************************************************/
/* MODULE */
/*****************************************************/
#ifdef MODULE
static char *mode = NULL;
static int vesa = 0;
static unsigned int rate = 0;
static unsigned int mem = 0;
static char *forcecrt2type = NULL;
static int forcecrt1 = -1;
static int pdc = -1;
static int pdc1 = -1;
static int noypan = -1;
static int userom = -1;
static int useoem = -1;
static char *tvstandard = NULL;
static int nocrt2rate = 0;
static int scalelcd = -1;
static char *specialtiming = NULL;
static int lvdshl = -1;
static int tvxposoffset = 0, tvyposoffset = 0;
#if !defined(__i386__) && !defined(__x86_64__)
static int resetcard = 0;
static int videoram = 0;
#endif
MODULE_DESCRIPTION("Z7 Z9 Z9S Z11 framebuffer device driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("XGITECH , Others");
module_param(mem, int, 0);
module_param(noypan, int, 0);
module_param(userom, int, 0);
module_param(useoem, int, 0);
module_param(mode, charp, 0);
module_param(vesa, int, 0);
module_param(rate, int, 0);
module_param(forcecrt1, int, 0);
module_param(forcecrt2type, charp, 0);
module_param(scalelcd, int, 0);
module_param(pdc, int, 0);
module_param(pdc1, int, 0);
module_param(specialtiming, charp, 0);
module_param(lvdshl, int, 0);
module_param(tvstandard, charp, 0);
module_param(tvxposoffset, int, 0);
module_param(tvyposoffset, int, 0);
module_param(filter, int, 0);
module_param(nocrt2rate, int, 0);
#if !defined(__i386__) && !defined(__x86_64__)
module_param(resetcard, int, 0);
module_param(videoram, int, 0);
#endif
MODULE_PARM_DESC(noypan,
"\nIf set to anything other than 0, y-panning will be disabled and scrolling\n"
"will be performed by redrawing the screen. (default: 0)\n");
MODULE_PARM_DESC(mode,
"\nSelects the desired default display mode in the format XxYxDepth,\n"
"eg. 1024x768x16. Other formats supported include XxY-Depth and\n"
"XxY-Depth@Rate. If the parameter is only one (decimal or hexadecimal)\n"
"number, it will be interpreted as a VESA mode number. (default: 800x600x8)\n");
MODULE_PARM_DESC(vesa,
"\nSelects the desired default display mode by VESA defined mode number, eg.\n"
"0x117 (default: 0x0103)\n");
MODULE_PARM_DESC(rate,
"\nSelects the desired vertical refresh rate for CRT1 (external VGA) in Hz.\n"
"If the mode is specified in the format XxY-Depth@Rate, this parameter\n"
"will be ignored (default: 60)\n");
MODULE_PARM_DESC(forcecrt1,
"\nNormally, the driver autodetects whether or not CRT1 (external VGA) is\n"
"connected. With this option, the detection can be overridden (1=CRT1 ON,\n"
"0=CRT1 OFF) (default: [autodetected])\n");
MODULE_PARM_DESC(forcecrt2type,
"\nIf this option is omitted, the driver autodetects CRT2 output devices, such as\n"
"LCD, TV or secondary VGA. With this option, this autodetection can be\n"
"overridden. Possible parameters are LCD, TV, VGA or NONE. NONE disables CRT2.\n"
"On systems with a SiS video bridge, parameters SVIDEO, COMPOSITE or SCART can\n"
"be used instead of TV to override the TV detection. Furthermore, on systems\n"
"with a SiS video bridge, SVIDEO+COMPOSITE, HIVISION, YPBPR480I, YPBPR480P,\n"
"YPBPR720P and YPBPR1080I are understood. However, whether or not these work\n"
"depends on the very hardware in use. (default: [autodetected])\n");
MODULE_PARM_DESC(scalelcd,
"\nSetting this to 1 will force the driver to scale the LCD image to the panel's\n"
"native resolution. Setting it to 0 will disable scaling; LVDS panels will\n"
"show black bars around the image, TMDS panels will probably do the scaling\n"
"themselves. Default: 1 on LVDS panels, 0 on TMDS panels\n");
MODULE_PARM_DESC(pdc,
"\nThis is for manually selecting the LCD panel delay compensation. The driver\n"
"should detect this correctly in most cases; however, sometimes this is not\n"
"possible. If you see 'small waves' on the LCD, try setting this to 4, 32 or 24\n"
"on a 300 series chipset; 6 on a 315 series chipset. If the problem persists,\n"
"try other values (on 300 series: between 4 and 60 in steps of 4; on 315 series:\n"
"any value from 0 to 31). (default: autodetected, if LCD is active during start)\n");
MODULE_PARM_DESC(pdc1,
"\nThis is same as pdc, but for LCD-via CRT1. Hence, this is for the 315/330\n"
"series only. (default: autodetected if LCD is in LCD-via-CRT1 mode during\n"
"startup) - Note: currently, this has no effect because LCD-via-CRT1 is not\n"
"implemented yet.\n");
MODULE_PARM_DESC(specialtiming,
"\nPlease refer to documentation for more information on this option.\n");
MODULE_PARM_DESC(lvdshl,
"\nPlease refer to documentation for more information on this option.\n");
MODULE_PARM_DESC(tvstandard,
"\nThis allows overriding the BIOS default for the TV standard. Valid choices are\n"
"pal, ntsc, palm and paln. (default: [auto; pal or ntsc only])\n");
MODULE_PARM_DESC(tvxposoffset,
"\nRelocate TV output horizontally. Possible parameters: -32 through 32.\n"
"Default: 0\n");
MODULE_PARM_DESC(tvyposoffset,
"\nRelocate TV output vertically. Possible parameters: -32 through 32.\n"
"Default: 0\n");
MODULE_PARM_DESC(filter,
"\nSelects TV flicker filter type (only for systems with a SiS301 video bridge).\n"
"(Possible values 0-7, default: [no filter])\n");
MODULE_PARM_DESC(nocrt2rate,
"\nSetting this to 1 will force the driver to use the default refresh rate for\n"
"CRT2 if CRT2 type is VGA. (default: 0, use same rate as CRT1)\n");
static int __init xgifb_init_module(void)
{
printk("\nXGIfb_init_module");
if (mode)
XGIfb_search_mode(mode);
else if (vesa != -1)
XGIfb_search_vesamode(vesa);
return xgifb_init();
}
static void __exit xgifb_remove_module(void)
{
pci_unregister_driver(&xgifb_driver);
printk(KERN_DEBUG "xgifb: Module unloaded\n");
}
module_init(xgifb_init_module);
module_exit(xgifb_remove_module);
#endif /* /MODULE */
| gpl-2.0 |
Pafcholini/emotion_tw_511_COI3 | arch/arm/mach-at91/board-yl-9200.c | 2636 | 19069 | /*
* linux/arch/arm/mach-at91/board-yl-9200.c
*
* Adapted from various board files in arch/arm/mach-at91
*
* Modifications for YL-9200 platform:
* Copyright (C) 2007 S. Birtles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
#include <mach/cpu.h>
#include "at91_aic.h"
#include "board.h"
#include "generic.h"
static void __init yl9200_init_early(void)
{
/* Set cpu type: PQFP */
at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
/* Initialize processor: 18.432 MHz crystal */
at91_initialize(18432000);
}
/*
* LEDs
*/
static struct gpio_led yl9200_leds[] = {
{ /* D2 */
.name = "led2",
.gpio = AT91_PIN_PB17,
.active_low = 1,
.default_trigger = "timer",
},
{ /* D3 */
.name = "led3",
.gpio = AT91_PIN_PB16,
.active_low = 1,
.default_trigger = "heartbeat",
},
{ /* D4 */
.name = "led4",
.gpio = AT91_PIN_PB15,
.active_low = 1,
},
{ /* D5 */
.name = "led5",
.gpio = AT91_PIN_PB8,
.active_low = 1,
}
};
/*
* Ethernet
*/
static struct macb_platform_data __initdata yl9200_eth_data = {
.phy_irq_pin = AT91_PIN_PB28,
.is_rmii = 1,
};
/*
* USB Host
*/
static struct at91_usbh_data __initdata yl9200_usbh_data = {
.ports = 1, /* PQFP version of AT91RM9200 */
.vbus_pin = {-EINVAL, -EINVAL},
.overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
* USB Device
*/
static struct at91_udc_data __initdata yl9200_udc_data = {
.pullup_pin = AT91_PIN_PC4,
.vbus_pin = AT91_PIN_PC5,
.pullup_active_low = 1, /* Active Low due to PNP transistor (pg 7) */
};
/*
* MMC
*/
static struct mci_platform_data __initdata yl9200_mci0_data = {
.slot[0] = {
.bus_width = 4,
.detect_pin = AT91_PIN_PB9,
.wp_pin = -EINVAL,
},
};
/*
* NAND Flash
*/
static struct mtd_partition __initdata yl9200_nand_partition[] = {
{
.name = "AT91 NAND partition 1, boot",
.offset = 0,
.size = SZ_256K
},
{
.name = "AT91 NAND partition 2, kernel",
.offset = MTDPART_OFS_NXTBLK,
.size = (2 * SZ_1M) - SZ_256K
},
{
.name = "AT91 NAND partition 3, filesystem",
.offset = MTDPART_OFS_NXTBLK,
.size = 14 * SZ_1M
},
{
.name = "AT91 NAND partition 4, storage",
.offset = MTDPART_OFS_NXTBLK,
.size = SZ_16M
},
{
.name = "AT91 NAND partition 5, ext-fs",
.offset = MTDPART_OFS_NXTBLK,
.size = SZ_32M
}
};
static struct atmel_nand_data __initdata yl9200_nand_data = {
.ale = 6,
.cle = 7,
.det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */
.enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */
.ecc_mode = NAND_ECC_SOFT,
.parts = yl9200_nand_partition,
.num_parts = ARRAY_SIZE(yl9200_nand_partition),
};
/*
* NOR Flash
*/
#define YL9200_FLASH_BASE AT91_CHIPSELECT_0
#define YL9200_FLASH_SIZE SZ_16M
static struct mtd_partition yl9200_flash_partitions[] = {
{
.name = "Bootloader",
.offset = 0,
.size = SZ_256K,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
.name = "Kernel",
.offset = MTDPART_OFS_NXTBLK,
.size = (2 * SZ_1M) - SZ_256K
},
{
.name = "Filesystem",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL
}
};
static struct physmap_flash_data yl9200_flash_data = {
.width = 2,
.parts = yl9200_flash_partitions,
.nr_parts = ARRAY_SIZE(yl9200_flash_partitions),
};
static struct resource yl9200_flash_resources[] = {
{
.start = YL9200_FLASH_BASE,
.end = YL9200_FLASH_BASE + YL9200_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device yl9200_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &yl9200_flash_data,
},
.resource = yl9200_flash_resources,
.num_resources = ARRAY_SIZE(yl9200_flash_resources),
};
/*
* I2C (TWI)
*/
static struct i2c_board_info __initdata yl9200_i2c_devices[] = {
{ /* EEPROM */
I2C_BOARD_INFO("24c128", 0x50),
}
};
/*
* GPIO Buttons
*/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
static struct gpio_keys_button yl9200_buttons[] = {
{
.gpio = AT91_PIN_PA24,
.code = BTN_2,
.desc = "SW2",
.active_low = 1,
.wakeup = 1,
},
{
.gpio = AT91_PIN_PB1,
.code = BTN_3,
.desc = "SW3",
.active_low = 1,
.wakeup = 1,
},
{
.gpio = AT91_PIN_PB2,
.code = BTN_4,
.desc = "SW4",
.active_low = 1,
.wakeup = 1,
},
{
.gpio = AT91_PIN_PB6,
.code = BTN_5,
.desc = "SW5",
.active_low = 1,
.wakeup = 1,
}
};
static struct gpio_keys_platform_data yl9200_button_data = {
.buttons = yl9200_buttons,
.nbuttons = ARRAY_SIZE(yl9200_buttons),
};
static struct platform_device yl9200_button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &yl9200_button_data,
}
};
static void __init yl9200_add_device_buttons(void)
{
at91_set_gpio_input(AT91_PIN_PA24, 1); /* SW2 */
at91_set_deglitch(AT91_PIN_PA24, 1);
at91_set_gpio_input(AT91_PIN_PB1, 1); /* SW3 */
at91_set_deglitch(AT91_PIN_PB1, 1);
at91_set_gpio_input(AT91_PIN_PB2, 1); /* SW4 */
at91_set_deglitch(AT91_PIN_PB2, 1);
at91_set_gpio_input(AT91_PIN_PB6, 1); /* SW5 */
at91_set_deglitch(AT91_PIN_PB6, 1);
/* Enable buttons (Sheet 5) */
at91_set_gpio_output(AT91_PIN_PB7, 1);
platform_device_register(&yl9200_button_device);
}
#else
static void __init yl9200_add_device_buttons(void) {}
#endif
/*
* Touchscreen
*/
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
static int ads7843_pendown_state(void)
{
return !at91_get_gpio_value(AT91_PIN_PB11); /* Touchscreen PENIRQ */
}
static struct ads7846_platform_data ads_info = {
.model = 7843,
.x_min = 150,
.x_max = 3830,
.y_min = 190,
.y_max = 3830,
.vref_delay_usecs = 100,
/* For a 8" touch-screen */
// .x_plate_ohms = 603,
// .y_plate_ohms = 332,
/* For a 10.4" touch-screen */
// .x_plate_ohms = 611,
// .y_plate_ohms = 325,
.x_plate_ohms = 576,
.y_plate_ohms = 366,
.pressure_max = 15000, /* generally nonsense on the 7843 */
.debounce_max = 1,
.debounce_rep = 0,
.debounce_tol = (~0),
.get_pendown_state = ads7843_pendown_state,
};
static void __init yl9200_add_device_ts(void)
{
at91_set_gpio_input(AT91_PIN_PB11, 1); /* Touchscreen interrupt pin */
at91_set_gpio_input(AT91_PIN_PB10, 1); /* Touchscreen BUSY signal - not used! */
}
#else
static void __init yl9200_add_device_ts(void) {}
#endif
/*
* SPI devices
*/
static struct spi_board_info yl9200_spi_devices[] = {
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
{ /* Touchscreen */
.modalias = "ads7846",
.chip_select = 0,
.max_speed_hz = 5000 * 26,
.platform_data = &ads_info,
.irq = AT91_PIN_PB11,
},
#endif
{ /* CAN */
.modalias = "mcp2510",
.chip_select = 1,
.max_speed_hz = 25000 * 26,
.irq = AT91_PIN_PC0,
}
};
/*
* LCD / VGA
*
* EPSON S1D13806 FB (discontinued chip)
* EPSON S1D13506 FB
*/
#if defined(CONFIG_FB_S1D13XXX) || defined(CONFIG_FB_S1D13XXX_MODULE)
#include <video/s1d13xxxfb.h>
static void yl9200_init_video(void)
{
/* NWAIT Signal */
at91_set_A_periph(AT91_PIN_PC6, 0);
/* Initialization of the Static Memory Controller for Chip Select 2 */
at91_ramc_write(0, AT91_SMC_CSR(2), AT91_SMC_DBW_16 /* 16 bit */
| AT91_SMC_WSEN | AT91_SMC_NWS_(0x4) /* wait states */
| AT91_SMC_TDF_(0x100) /* float time */
);
}
static struct s1d13xxxfb_regval yl9200_s1dfb_initregs[] =
{
{S1DREG_MISC, 0x00}, /* Miscellaneous Register*/
{S1DREG_COM_DISP_MODE, 0x01}, /* Display Mode Register, LCD only*/
{S1DREG_GPIO_CNF0, 0x00}, /* General IO Pins Configuration Register*/
{S1DREG_GPIO_CTL0, 0x00}, /* General IO Pins Control Register*/
{S1DREG_CLK_CNF, 0x11}, /* Memory Clock Configuration Register*/
{S1DREG_LCD_CLK_CNF, 0x10}, /* LCD Pixel Clock Configuration Register*/
{S1DREG_CRT_CLK_CNF, 0x12}, /* CRT/TV Pixel Clock Configuration Register*/
{S1DREG_MPLUG_CLK_CNF, 0x01}, /* MediaPlug Clock Configuration Register*/
{S1DREG_CPU2MEM_WST_SEL, 0x02}, /* CPU To Memory Wait State Select Register*/
{S1DREG_MEM_CNF, 0x00}, /* Memory Configuration Register*/
{S1DREG_SDRAM_REF_RATE, 0x04}, /* DRAM Refresh Rate Register, MCLK source*/
{S1DREG_SDRAM_TC0, 0x12}, /* DRAM Timings Control Register 0*/
{S1DREG_SDRAM_TC1, 0x02}, /* DRAM Timings Control Register 1*/
{S1DREG_PANEL_TYPE, 0x25}, /* Panel Type Register*/
{S1DREG_MOD_RATE, 0x00}, /* MOD Rate Register*/
{S1DREG_LCD_DISP_HWIDTH, 0x4F}, /* LCD Horizontal Display Width Register*/
{S1DREG_LCD_NDISP_HPER, 0x13}, /* LCD Horizontal Non-Display Period Register*/
{S1DREG_TFT_FPLINE_START, 0x01}, /* TFT FPLINE Start Position Register*/
{S1DREG_TFT_FPLINE_PWIDTH, 0x0c}, /* TFT FPLINE Pulse Width Register*/
{S1DREG_LCD_DISP_VHEIGHT0, 0xDF}, /* LCD Vertical Display Height Register 0*/
{S1DREG_LCD_DISP_VHEIGHT1, 0x01}, /* LCD Vertical Display Height Register 1*/
{S1DREG_LCD_NDISP_VPER, 0x2c}, /* LCD Vertical Non-Display Period Register*/
{S1DREG_TFT_FPFRAME_START, 0x0a}, /* TFT FPFRAME Start Position Register*/
{S1DREG_TFT_FPFRAME_PWIDTH, 0x02}, /* TFT FPFRAME Pulse Width Register*/
{S1DREG_LCD_DISP_MODE, 0x05}, /* LCD Display Mode Register*/
{S1DREG_LCD_MISC, 0x01}, /* LCD Miscellaneous Register*/
{S1DREG_LCD_DISP_START0, 0x00}, /* LCD Display Start Address Register 0*/
{S1DREG_LCD_DISP_START1, 0x00}, /* LCD Display Start Address Register 1*/
{S1DREG_LCD_DISP_START2, 0x00}, /* LCD Display Start Address Register 2*/
{S1DREG_LCD_MEM_OFF0, 0x80}, /* LCD Memory Address Offset Register 0*/
{S1DREG_LCD_MEM_OFF1, 0x02}, /* LCD Memory Address Offset Register 1*/
{S1DREG_LCD_PIX_PAN, 0x03}, /* LCD Pixel Panning Register*/
{S1DREG_LCD_DISP_FIFO_HTC, 0x00}, /* LCD Display FIFO High Threshold Control Register*/
{S1DREG_LCD_DISP_FIFO_LTC, 0x00}, /* LCD Display FIFO Low Threshold Control Register*/
{S1DREG_CRT_DISP_HWIDTH, 0x4F}, /* CRT/TV Horizontal Display Width Register*/
{S1DREG_CRT_NDISP_HPER, 0x13}, /* CRT/TV Horizontal Non-Display Period Register*/
{S1DREG_CRT_HRTC_START, 0x01}, /* CRT/TV HRTC Start Position Register*/
{S1DREG_CRT_HRTC_PWIDTH, 0x0B}, /* CRT/TV HRTC Pulse Width Register*/
{S1DREG_CRT_DISP_VHEIGHT0, 0xDF}, /* CRT/TV Vertical Display Height Register 0*/
{S1DREG_CRT_DISP_VHEIGHT1, 0x01}, /* CRT/TV Vertical Display Height Register 1*/
{S1DREG_CRT_NDISP_VPER, 0x2B}, /* CRT/TV Vertical Non-Display Period Register*/
{S1DREG_CRT_VRTC_START, 0x09}, /* CRT/TV VRTC Start Position Register*/
{S1DREG_CRT_VRTC_PWIDTH, 0x01}, /* CRT/TV VRTC Pulse Width Register*/
{S1DREG_TV_OUT_CTL, 0x18}, /* TV Output Control Register */
{S1DREG_CRT_DISP_MODE, 0x05}, /* CRT/TV Display Mode Register, 16BPP*/
{S1DREG_CRT_DISP_START0, 0x00}, /* CRT/TV Display Start Address Register 0*/
{S1DREG_CRT_DISP_START1, 0x00}, /* CRT/TV Display Start Address Register 1*/
{S1DREG_CRT_DISP_START2, 0x00}, /* CRT/TV Display Start Address Register 2*/
{S1DREG_CRT_MEM_OFF0, 0x80}, /* CRT/TV Memory Address Offset Register 0*/
{S1DREG_CRT_MEM_OFF1, 0x02}, /* CRT/TV Memory Address Offset Register 1*/
{S1DREG_CRT_PIX_PAN, 0x00}, /* CRT/TV Pixel Panning Register*/
{S1DREG_CRT_DISP_FIFO_HTC, 0x00}, /* CRT/TV Display FIFO High Threshold Control Register*/
{S1DREG_CRT_DISP_FIFO_LTC, 0x00}, /* CRT/TV Display FIFO Low Threshold Control Register*/
{S1DREG_LCD_CUR_CTL, 0x00}, /* LCD Ink/Cursor Control Register*/
{S1DREG_LCD_CUR_START, 0x01}, /* LCD Ink/Cursor Start Address Register*/
{S1DREG_LCD_CUR_XPOS0, 0x00}, /* LCD Cursor X Position Register 0*/
{S1DREG_LCD_CUR_XPOS1, 0x00}, /* LCD Cursor X Position Register 1*/
{S1DREG_LCD_CUR_YPOS0, 0x00}, /* LCD Cursor Y Position Register 0*/
{S1DREG_LCD_CUR_YPOS1, 0x00}, /* LCD Cursor Y Position Register 1*/
{S1DREG_LCD_CUR_BCTL0, 0x00}, /* LCD Ink/Cursor Blue Color 0 Register*/
{S1DREG_LCD_CUR_GCTL0, 0x00}, /* LCD Ink/Cursor Green Color 0 Register*/
{S1DREG_LCD_CUR_RCTL0, 0x00}, /* LCD Ink/Cursor Red Color 0 Register*/
{S1DREG_LCD_CUR_BCTL1, 0x1F}, /* LCD Ink/Cursor Blue Color 1 Register*/
{S1DREG_LCD_CUR_GCTL1, 0x3F}, /* LCD Ink/Cursor Green Color 1 Register*/
{S1DREG_LCD_CUR_RCTL1, 0x1F}, /* LCD Ink/Cursor Red Color 1 Register*/
{S1DREG_LCD_CUR_FIFO_HTC, 0x00}, /* LCD Ink/Cursor FIFO Threshold Register*/
{S1DREG_CRT_CUR_CTL, 0x00}, /* CRT/TV Ink/Cursor Control Register*/
{S1DREG_CRT_CUR_START, 0x01}, /* CRT/TV Ink/Cursor Start Address Register*/
{S1DREG_CRT_CUR_XPOS0, 0x00}, /* CRT/TV Cursor X Position Register 0*/
{S1DREG_CRT_CUR_XPOS1, 0x00}, /* CRT/TV Cursor X Position Register 1*/
{S1DREG_CRT_CUR_YPOS0, 0x00}, /* CRT/TV Cursor Y Position Register 0*/
{S1DREG_CRT_CUR_YPOS1, 0x00}, /* CRT/TV Cursor Y Position Register 1*/
{S1DREG_CRT_CUR_BCTL0, 0x00}, /* CRT/TV Ink/Cursor Blue Color 0 Register*/
{S1DREG_CRT_CUR_GCTL0, 0x00}, /* CRT/TV Ink/Cursor Green Color 0 Register*/
{S1DREG_CRT_CUR_RCTL0, 0x00}, /* CRT/TV Ink/Cursor Red Color 0 Register*/
{S1DREG_CRT_CUR_BCTL1, 0x1F}, /* CRT/TV Ink/Cursor Blue Color 1 Register*/
{S1DREG_CRT_CUR_GCTL1, 0x3F}, /* CRT/TV Ink/Cursor Green Color 1 Register*/
{S1DREG_CRT_CUR_RCTL1, 0x1F}, /* CRT/TV Ink/Cursor Red Color 1 Register*/
{S1DREG_CRT_CUR_FIFO_HTC, 0x00}, /* CRT/TV Ink/Cursor FIFO Threshold Register*/
{S1DREG_BBLT_CTL0, 0x00}, /* BitBlt Control Register 0*/
{S1DREG_BBLT_CTL1, 0x01}, /* BitBlt Control Register 1*/
{S1DREG_BBLT_CC_EXP, 0x00}, /* BitBlt ROP Code/Color Expansion Register*/
{S1DREG_BBLT_OP, 0x00}, /* BitBlt Operation Register*/
{S1DREG_BBLT_SRC_START0, 0x00}, /* BitBlt Source Start Address Register 0*/
{S1DREG_BBLT_SRC_START1, 0x00}, /* BitBlt Source Start Address Register 1*/
{S1DREG_BBLT_SRC_START2, 0x00}, /* BitBlt Source Start Address Register 2*/
{S1DREG_BBLT_DST_START0, 0x00}, /* BitBlt Destination Start Address Register 0*/
{S1DREG_BBLT_DST_START1, 0x00}, /* BitBlt Destination Start Address Register 1*/
{S1DREG_BBLT_DST_START2, 0x00}, /* BitBlt Destination Start Address Register 2*/
{S1DREG_BBLT_MEM_OFF0, 0x00}, /* BitBlt Memory Address Offset Register 0*/
{S1DREG_BBLT_MEM_OFF1, 0x00}, /* BitBlt Memory Address Offset Register 1*/
{S1DREG_BBLT_WIDTH0, 0x00}, /* BitBlt Width Register 0*/
{S1DREG_BBLT_WIDTH1, 0x00}, /* BitBlt Width Register 1*/
{S1DREG_BBLT_HEIGHT0, 0x00}, /* BitBlt Height Register 0*/
{S1DREG_BBLT_HEIGHT1, 0x00}, /* BitBlt Height Register 1*/
{S1DREG_BBLT_BGC0, 0x00}, /* BitBlt Background Color Register 0*/
{S1DREG_BBLT_BGC1, 0x00}, /* BitBlt Background Color Register 1*/
{S1DREG_BBLT_FGC0, 0x00}, /* BitBlt Foreground Color Register 0*/
{S1DREG_BBLT_FGC1, 0x00}, /* BitBlt Foreground Color Register 1*/
{S1DREG_LKUP_MODE, 0x00}, /* Look-Up Table Mode Register*/
{S1DREG_LKUP_ADDR, 0x00}, /* Look-Up Table Address Register*/
{S1DREG_PS_CNF, 0x00}, /* Power Save Configuration Register*/
{S1DREG_PS_STATUS, 0x00}, /* Power Save Status Register*/
{S1DREG_CPU2MEM_WDOGT, 0x00}, /* CPU-to-Memory Access Watchdog Timer Register*/
{S1DREG_COM_DISP_MODE, 0x01}, /* Display Mode Register, LCD only*/
};
static struct s1d13xxxfb_pdata yl9200_s1dfb_pdata = {
.initregs = yl9200_s1dfb_initregs,
.initregssize = ARRAY_SIZE(yl9200_s1dfb_initregs),
.platform_init_video = yl9200_init_video,
};
#define YL9200_FB_REG_BASE AT91_CHIPSELECT_7
#define YL9200_FB_VMEM_BASE YL9200_FB_REG_BASE + SZ_2M
#define YL9200_FB_VMEM_SIZE SZ_2M
static struct resource yl9200_s1dfb_resource[] = {
[0] = { /* video mem */
.name = "s1d13xxxfb memory",
.start = YL9200_FB_VMEM_BASE,
.end = YL9200_FB_VMEM_BASE + YL9200_FB_VMEM_SIZE -1,
.flags = IORESOURCE_MEM,
},
[1] = { /* video registers */
.name = "s1d13xxxfb registers",
.start = YL9200_FB_REG_BASE,
.end = YL9200_FB_REG_BASE + SZ_512 -1,
.flags = IORESOURCE_MEM,
},
};
static u64 s1dfb_dmamask = DMA_BIT_MASK(32);
static struct platform_device yl9200_s1dfb_device = {
.name = "s1d13806fb",
.id = -1,
.dev = {
.dma_mask = &s1dfb_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &yl9200_s1dfb_pdata,
},
.resource = yl9200_s1dfb_resource,
.num_resources = ARRAY_SIZE(yl9200_s1dfb_resource),
};
void __init yl9200_add_device_video(void)
{
platform_device_register(&yl9200_s1dfb_device);
}
#else
void __init yl9200_add_device_video(void) {}
#endif
static void __init yl9200_board_init(void)
{
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
/* USART0 on ttyS2. (Rx & Tx only to JP3) */
at91_register_uart(AT91RM9200_ID_US0, 2, 0);
/* USART3 on ttyS3. (Rx, Tx, RTS - RS485 interface) */
at91_register_uart(AT91RM9200_ID_US3, 3, ATMEL_UART_RTS);
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&yl9200_eth_data);
/* USB Host */
at91_add_device_usbh(&yl9200_usbh_data);
/* USB Device */
at91_add_device_udc(&yl9200_udc_data);
/* I2C */
at91_add_device_i2c(yl9200_i2c_devices, ARRAY_SIZE(yl9200_i2c_devices));
/* MMC */
at91_add_device_mci(0, &yl9200_mci0_data);
/* NAND */
at91_add_device_nand(&yl9200_nand_data);
/* NOR Flash */
platform_device_register(&yl9200_flash);
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
/* SPI */
at91_add_device_spi(yl9200_spi_devices, ARRAY_SIZE(yl9200_spi_devices));
/* Touchscreen */
yl9200_add_device_ts();
#endif
/* LEDs. */
at91_gpio_leds(yl9200_leds, ARRAY_SIZE(yl9200_leds));
/* Push Buttons */
yl9200_add_device_buttons();
/* VGA */
yl9200_add_device_video();
}
MACHINE_START(YL9200, "uCdragon YL-9200")
/* Maintainer: S.Birtles */
.init_time = at91rm9200_timer_init,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = yl9200_init_early,
.init_irq = at91_init_irq_default,
.init_machine = yl9200_board_init,
MACHINE_END
| gpl-2.0 |
metacloud/linux | arch/arm/mach-at91/board-1arm.c | 2636 | 2806 | /*
* linux/arch/arm/mach-at91/board-1arm.c
*
* Copyright (C) 2005 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/cpu.h>
#include "at91_aic.h"
#include "board.h"
#include "generic.h"
static void __init onearm_init_early(void)
{
/* Set cpu type: PQFP */
at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
/* Initialize processor: 18.432 MHz crystal */
at91_initialize(18432000);
}
static struct macb_platform_data __initdata onearm_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata onearm_usbh_data = {
.ports = 1,
.vbus_pin = {-EINVAL, -EINVAL},
.overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata onearm_udc_data = {
.vbus_pin = AT91_PIN_PC2,
.pullup_pin = AT91_PIN_PC3,
};
static void __init onearm_board_init(void)
{
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
/* USART1 on ttyS2 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&onearm_eth_data);
/* USB Host */
at91_add_device_usbh(&onearm_usbh_data);
/* USB Device */
at91_add_device_udc(&onearm_udc_data);
}
MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.init_time = at91rm9200_timer_init,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = onearm_init_early,
.init_irq = at91_init_irq_default,
.init_machine = onearm_board_init,
MACHINE_END
| gpl-2.0 |
imoseyon/leanKernel-angler | arch/arm/mach-at91/board-picotux200.c | 2636 | 3578 | /*
* linux/arch/arm/mach-at91/board-picotux200.c
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2007 Kleinhenz Elektronik GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/mtd/physmap.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
#include "at91_aic.h"
#include "board.h"
#include "generic.h"
static void __init picotux200_init_early(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91_initialize(18432000);
}
static struct macb_platform_data __initdata picotux200_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata picotux200_usbh_data = {
.ports = 1,
.vbus_pin = {-EINVAL, -EINVAL},
.overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct mci_platform_data __initdata picotux200_mci0_data = {
.slot[0] = {
.bus_width = 4,
.detect_pin = AT91_PIN_PB27,
.wp_pin = AT91_PIN_PA17,
},
};
#define PICOTUX200_FLASH_BASE AT91_CHIPSELECT_0
#define PICOTUX200_FLASH_SIZE SZ_4M
static struct physmap_flash_data picotux200_flash_data = {
.width = 2,
};
static struct resource picotux200_flash_resource = {
.start = PICOTUX200_FLASH_BASE,
.end = PICOTUX200_FLASH_BASE + PICOTUX200_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device picotux200_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &picotux200_flash_data,
},
.resource = &picotux200_flash_resource,
.num_resources = 1,
};
static void __init picotux200_board_init(void)
{
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
| ATMEL_UART_RI);
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&picotux200_eth_data);
/* USB Host */
at91_add_device_usbh(&picotux200_usbh_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* MMC */
at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */
at91_add_device_mci(0, &picotux200_mci0_data);
/* NOR Flash */
platform_device_register(&picotux200_flash);
}
MACHINE_START(PICOTUX2XX, "picotux 200")
/* Maintainer: Kleinhenz Elektronik GmbH */
.init_time = at91rm9200_timer_init,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = picotux200_init_early,
.init_irq = at91_init_irq_default,
.init_machine = picotux200_board_init,
MACHINE_END
| gpl-2.0 |
CyanHacker-Lollipop/kernel_htc_msm8974 | sound/soc/sh/siu_pcm.c | 3660 | 16798 | /*
* siu_pcm.c - ALSA driver for Renesas SH7343, SH7722 SIU peripheral.
*
* Copyright (C) 2009-2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2006 Carlos Munoz <carlos@kenati.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/siu.h>
#include "siu.h"
#define GET_MAX_PERIODS(buf_bytes, period_bytes) \
((buf_bytes) / (period_bytes))
#define PERIOD_OFFSET(buf_addr, period_num, period_bytes) \
((buf_addr) + ((period_num) * (period_bytes)))
#define RWF_STM_RD 0x01 /* Read in progress */
#define RWF_STM_WT 0x02 /* Write in progress */
struct siu_port *siu_ports[SIU_PORT_NUM];
/* transfersize is number of u32 dma transfers per period */
static int siu_pcm_stmwrite_stop(struct siu_port *port_info)
{
struct siu_info *info = siu_i2s_data;
u32 __iomem *base = info->reg;
struct siu_stream *siu_stream = &port_info->playback;
u32 stfifo;
if (!siu_stream->rw_flg)
return -EPERM;
/* output FIFO disable */
stfifo = siu_read32(base + SIU_STFIFO);
siu_write32(base + SIU_STFIFO, stfifo & ~0x0c180c18);
pr_debug("%s: STFIFO %x -> %x\n", __func__,
stfifo, stfifo & ~0x0c180c18);
/* during stmwrite clear */
siu_stream->rw_flg = 0;
return 0;
}
static int siu_pcm_stmwrite_start(struct siu_port *port_info)
{
struct siu_stream *siu_stream = &port_info->playback;
if (siu_stream->rw_flg)
return -EPERM;
/* Current period in buffer */
port_info->playback.cur_period = 0;
/* during stmwrite flag set */
siu_stream->rw_flg = RWF_STM_WT;
/* DMA transfer start */
tasklet_schedule(&siu_stream->tasklet);
return 0;
}
static void siu_dma_tx_complete(void *arg)
{
struct siu_stream *siu_stream = arg;
if (!siu_stream->rw_flg)
return;
/* Update completed period count */
if (++siu_stream->cur_period >=
GET_MAX_PERIODS(siu_stream->buf_bytes,
siu_stream->period_bytes))
siu_stream->cur_period = 0;
pr_debug("%s: done period #%d (%u/%u bytes), cookie %d\n",
__func__, siu_stream->cur_period,
siu_stream->cur_period * siu_stream->period_bytes,
siu_stream->buf_bytes, siu_stream->cookie);
tasklet_schedule(&siu_stream->tasklet);
/* Notify alsa: a period is done */
snd_pcm_period_elapsed(siu_stream->substream);
}
static int siu_pcm_wr_set(struct siu_port *port_info,
dma_addr_t buff, u32 size)
{
struct siu_info *info = siu_i2s_data;
u32 __iomem *base = info->reg;
struct siu_stream *siu_stream = &port_info->playback;
struct snd_pcm_substream *substream = siu_stream->substream;
struct device *dev = substream->pcm->card->dev;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
struct scatterlist sg;
u32 stfifo;
sg_init_table(&sg, 1);
sg_set_page(&sg, pfn_to_page(PFN_DOWN(buff)),
size, offset_in_page(buff));
sg_dma_len(&sg) = size;
sg_dma_address(&sg) = buff;
desc = dmaengine_prep_slave_sg(siu_stream->chan,
&sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(dev, "Failed to allocate a dma descriptor\n");
return -ENOMEM;
}
desc->callback = siu_dma_tx_complete;
desc->callback_param = siu_stream;
cookie = desc->tx_submit(desc);
if (cookie < 0) {
dev_err(dev, "Failed to submit a dma transfer\n");
return cookie;
}
siu_stream->tx_desc = desc;
siu_stream->cookie = cookie;
dma_async_issue_pending(siu_stream->chan);
/* only output FIFO enable */
stfifo = siu_read32(base + SIU_STFIFO);
siu_write32(base + SIU_STFIFO, stfifo | (port_info->stfifo & 0x0c180c18));
dev_dbg(dev, "%s: STFIFO %x -> %x\n", __func__,
stfifo, stfifo | (port_info->stfifo & 0x0c180c18));
return 0;
}
static int siu_pcm_rd_set(struct siu_port *port_info,
dma_addr_t buff, size_t size)
{
struct siu_info *info = siu_i2s_data;
u32 __iomem *base = info->reg;
struct siu_stream *siu_stream = &port_info->capture;
struct snd_pcm_substream *substream = siu_stream->substream;
struct device *dev = substream->pcm->card->dev;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
struct scatterlist sg;
u32 stfifo;
dev_dbg(dev, "%s: %u@%llx\n", __func__, size, (unsigned long long)buff);
sg_init_table(&sg, 1);
sg_set_page(&sg, pfn_to_page(PFN_DOWN(buff)),
size, offset_in_page(buff));
sg_dma_len(&sg) = size;
sg_dma_address(&sg) = buff;
desc = dmaengine_prep_slave_sg(siu_stream->chan,
&sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(dev, "Failed to allocate dma descriptor\n");
return -ENOMEM;
}
desc->callback = siu_dma_tx_complete;
desc->callback_param = siu_stream;
cookie = desc->tx_submit(desc);
if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return cookie;
}
siu_stream->tx_desc = desc;
siu_stream->cookie = cookie;
dma_async_issue_pending(siu_stream->chan);
/* only input FIFO enable */
stfifo = siu_read32(base + SIU_STFIFO);
siu_write32(base + SIU_STFIFO, siu_read32(base + SIU_STFIFO) |
(port_info->stfifo & 0x13071307));
dev_dbg(dev, "%s: STFIFO %x -> %x\n", __func__,
stfifo, stfifo | (port_info->stfifo & 0x13071307));
return 0;
}
static void siu_io_tasklet(unsigned long data)
{
struct siu_stream *siu_stream = (struct siu_stream *)data;
struct snd_pcm_substream *substream = siu_stream->substream;
struct device *dev = substream->pcm->card->dev;
struct snd_pcm_runtime *rt = substream->runtime;
struct siu_port *port_info = siu_port_info(substream);
dev_dbg(dev, "%s: flags %x\n", __func__, siu_stream->rw_flg);
if (!siu_stream->rw_flg) {
dev_dbg(dev, "%s: stream inactive\n", __func__);
return;
}
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
dma_addr_t buff;
size_t count;
u8 *virt;
buff = (dma_addr_t)PERIOD_OFFSET(rt->dma_addr,
siu_stream->cur_period,
siu_stream->period_bytes);
virt = PERIOD_OFFSET(rt->dma_area,
siu_stream->cur_period,
siu_stream->period_bytes);
count = siu_stream->period_bytes;
/* DMA transfer start */
siu_pcm_rd_set(port_info, buff, count);
} else {
siu_pcm_wr_set(port_info,
(dma_addr_t)PERIOD_OFFSET(rt->dma_addr,
siu_stream->cur_period,
siu_stream->period_bytes),
siu_stream->period_bytes);
}
}
/* Capture */
static int siu_pcm_stmread_start(struct siu_port *port_info)
{
struct siu_stream *siu_stream = &port_info->capture;
if (siu_stream->xfer_cnt > 0x1000000)
return -EINVAL;
if (siu_stream->rw_flg)
return -EPERM;
/* Current period in buffer */
siu_stream->cur_period = 0;
/* during stmread flag set */
siu_stream->rw_flg = RWF_STM_RD;
tasklet_schedule(&siu_stream->tasklet);
return 0;
}
static int siu_pcm_stmread_stop(struct siu_port *port_info)
{
struct siu_info *info = siu_i2s_data;
u32 __iomem *base = info->reg;
struct siu_stream *siu_stream = &port_info->capture;
struct device *dev = siu_stream->substream->pcm->card->dev;
u32 stfifo;
if (!siu_stream->rw_flg)
return -EPERM;
/* input FIFO disable */
stfifo = siu_read32(base + SIU_STFIFO);
siu_write32(base + SIU_STFIFO, stfifo & ~0x13071307);
dev_dbg(dev, "%s: STFIFO %x -> %x\n", __func__,
stfifo, stfifo & ~0x13071307);
/* during stmread flag clear */
siu_stream->rw_flg = 0;
return 0;
}
static int siu_pcm_hw_params(struct snd_pcm_substream *ss,
struct snd_pcm_hw_params *hw_params)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
int ret;
dev_dbg(dev, "%s: port=%d\n", __func__, info->port_id);
ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
if (ret < 0)
dev_err(dev, "snd_pcm_lib_malloc_pages() failed\n");
return ret;
}
static int siu_pcm_hw_free(struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
struct device *dev = ss->pcm->card->dev;
struct siu_stream *siu_stream;
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
siu_stream = &port_info->playback;
else
siu_stream = &port_info->capture;
dev_dbg(dev, "%s: port=%d\n", __func__, info->port_id);
return snd_pcm_lib_free_pages(ss);
}
static bool filter(struct dma_chan *chan, void *slave)
{
struct sh_dmae_slave *param = slave;
pr_debug("%s: slave ID %d\n", __func__, param->slave_id);
if (unlikely(param->dma_dev != chan->device->dev))
return false;
chan->private = param;
return true;
}
static int siu_pcm_open(struct snd_pcm_substream *ss)
{
/* Playback / Capture */
struct snd_soc_pcm_runtime *rtd = ss->private_data;
struct siu_platform *pdata = rtd->platform->dev->platform_data;
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
struct siu_stream *siu_stream;
u32 port = info->port_id;
struct device *dev = ss->pcm->card->dev;
dma_cap_mask_t mask;
struct sh_dmae_slave *param;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dev_dbg(dev, "%s, port=%d@%p\n", __func__, port, port_info);
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) {
siu_stream = &port_info->playback;
param = &siu_stream->param;
param->slave_id = port ? pdata->dma_slave_tx_b :
pdata->dma_slave_tx_a;
} else {
siu_stream = &port_info->capture;
param = &siu_stream->param;
param->slave_id = port ? pdata->dma_slave_rx_b :
pdata->dma_slave_rx_a;
}
param->dma_dev = pdata->dma_dev;
/* Get DMA channel */
siu_stream->chan = dma_request_channel(mask, filter, param);
if (!siu_stream->chan) {
dev_err(dev, "DMA channel allocation failed!\n");
return -EBUSY;
}
siu_stream->substream = ss;
return 0;
}
static int siu_pcm_close(struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
struct siu_port *port_info = siu_port_info(ss);
struct siu_stream *siu_stream;
dev_dbg(dev, "%s: port=%d\n", __func__, info->port_id);
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
siu_stream = &port_info->playback;
else
siu_stream = &port_info->capture;
dma_release_channel(siu_stream->chan);
siu_stream->chan = NULL;
siu_stream->substream = NULL;
return 0;
}
static int siu_pcm_prepare(struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
struct device *dev = ss->pcm->card->dev;
struct snd_pcm_runtime *rt = ss->runtime;
struct siu_stream *siu_stream;
snd_pcm_sframes_t xfer_cnt;
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
siu_stream = &port_info->playback;
else
siu_stream = &port_info->capture;
rt = siu_stream->substream->runtime;
siu_stream->buf_bytes = snd_pcm_lib_buffer_bytes(ss);
siu_stream->period_bytes = snd_pcm_lib_period_bytes(ss);
dev_dbg(dev, "%s: port=%d, %d channels, period=%u bytes\n", __func__,
info->port_id, rt->channels, siu_stream->period_bytes);
/* We only support buffers that are multiples of the period */
if (siu_stream->buf_bytes % siu_stream->period_bytes) {
dev_err(dev, "%s() - buffer=%d not multiple of period=%d\n",
__func__, siu_stream->buf_bytes,
siu_stream->period_bytes);
return -EINVAL;
}
xfer_cnt = bytes_to_frames(rt, siu_stream->period_bytes);
if (!xfer_cnt || xfer_cnt > 0x1000000)
return -EINVAL;
siu_stream->format = rt->format;
siu_stream->xfer_cnt = xfer_cnt;
dev_dbg(dev, "port=%d buf=%lx buf_bytes=%d period_bytes=%d "
"format=%d channels=%d xfer_cnt=%d\n", info->port_id,
(unsigned long)rt->dma_addr, siu_stream->buf_bytes,
siu_stream->period_bytes,
siu_stream->format, rt->channels, (int)xfer_cnt);
return 0;
}
static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
struct siu_port *port_info = siu_port_info(ss);
int ret;
dev_dbg(dev, "%s: port=%d@%p, cmd=%d\n", __func__,
info->port_id, port_info, cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
ret = siu_pcm_stmwrite_start(port_info);
else
ret = siu_pcm_stmread_start(port_info);
if (ret < 0)
dev_warn(dev, "%s: start failed on port=%d\n",
__func__, info->port_id);
break;
case SNDRV_PCM_TRIGGER_STOP:
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
siu_pcm_stmwrite_stop(port_info);
else
siu_pcm_stmread_stop(port_info);
ret = 0;
break;
default:
dev_err(dev, "%s() unsupported cmd=%d\n", __func__, cmd);
ret = -EINVAL;
}
return ret;
}
/*
* So far only resolution of one period is supported, subject to extending the
* dmangine API
*/
static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss)
{
struct device *dev = ss->pcm->card->dev;
struct siu_info *info = siu_i2s_data;
u32 __iomem *base = info->reg;
struct siu_port *port_info = siu_port_info(ss);
struct snd_pcm_runtime *rt = ss->runtime;
size_t ptr;
struct siu_stream *siu_stream;
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK)
siu_stream = &port_info->playback;
else
siu_stream = &port_info->capture;
/*
* ptr is the offset into the buffer where the dma is currently at. We
* check if the dma buffer has just wrapped.
*/
ptr = PERIOD_OFFSET(rt->dma_addr,
siu_stream->cur_period,
siu_stream->period_bytes) - rt->dma_addr;
dev_dbg(dev,
"%s: port=%d, events %x, FSTS %x, xferred %u/%u, cookie %d\n",
__func__, info->port_id, siu_read32(base + SIU_EVNTC),
siu_read32(base + SIU_SBFSTS), ptr, siu_stream->buf_bytes,
siu_stream->cookie);
if (ptr >= siu_stream->buf_bytes)
ptr = 0;
return bytes_to_frames(ss->runtime, ptr);
}
static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
/* card->dev == socdev->dev, see snd_soc_new_pcms() */
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
struct siu_info *info = siu_i2s_data;
struct platform_device *pdev = to_platform_device(card->dev);
int ret;
int i;
/* pdev->id selects between SIUA and SIUB */
if (pdev->id < 0 || pdev->id >= SIU_PORT_NUM)
return -EINVAL;
info->port_id = pdev->id;
/*
* While the siu has 2 ports, only one port can be on at a time (only 1
* SPB). So far all the boards using the siu had only one of the ports
* wired to a codec. To simplify things, we only register one port with
* alsa. In case both ports are needed, it should be changed here
*/
for (i = pdev->id; i < pdev->id + 1; i++) {
struct siu_port **port_info = &siu_ports[i];
ret = siu_init_port(i, port_info, card);
if (ret < 0)
return ret;
ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV, NULL,
SIU_BUFFER_BYTES_MAX, SIU_BUFFER_BYTES_MAX);
if (ret < 0) {
dev_err(card->dev,
"snd_pcm_lib_preallocate_pages_for_all() err=%d",
ret);
goto fail;
}
(*port_info)->pcm = pcm;
/* IO tasklets */
tasklet_init(&(*port_info)->playback.tasklet, siu_io_tasklet,
(unsigned long)&(*port_info)->playback);
tasklet_init(&(*port_info)->capture.tasklet, siu_io_tasklet,
(unsigned long)&(*port_info)->capture);
}
dev_info(card->dev, "SuperH SIU driver initialized.\n");
return 0;
fail:
siu_free_port(siu_ports[pdev->id]);
dev_err(card->dev, "SIU: failed to initialize.\n");
return ret;
}
static void siu_pcm_free(struct snd_pcm *pcm)
{
struct platform_device *pdev = to_platform_device(pcm->card->dev);
struct siu_port *port_info = siu_ports[pdev->id];
tasklet_kill(&port_info->capture.tasklet);
tasklet_kill(&port_info->playback.tasklet);
siu_free_port(port_info);
snd_pcm_lib_preallocate_free_for_all(pcm);
dev_dbg(pcm->card->dev, "%s\n", __func__);
}
static struct snd_pcm_ops siu_pcm_ops = {
.open = siu_pcm_open,
.close = siu_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = siu_pcm_hw_params,
.hw_free = siu_pcm_hw_free,
.prepare = siu_pcm_prepare,
.trigger = siu_pcm_trigger,
.pointer = siu_pcm_pointer_dma,
};
struct snd_soc_platform_driver siu_platform = {
.ops = &siu_pcm_ops,
.pcm_new = siu_pcm_new,
.pcm_free = siu_pcm_free,
};
EXPORT_SYMBOL_GPL(siu_platform);
| gpl-2.0 |
MassStash/m8whl_sense | drivers/sh/intc/chip.c | 4940 | 5479 | /*
* IRQ chip definitions for INTC IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpumask.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include "internals.h"
void _intc_enable(struct irq_data *data, unsigned long handle)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long addr;
unsigned int cpu;
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, data->affinity))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
intc_balancing_enable(irq);
}
static void intc_enable(struct irq_data *data)
{
_intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data));
}
static void intc_disable(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data);
unsigned long addr;
unsigned int cpu;
intc_balancing_disable(irq);
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, data->affinity))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
[_INTC_FN(handle)], irq);
}
}
#ifdef CONFIG_SMP
/*
* This is held with the irq desc lock held, so we don't require any
* additional locking here at the intc desc level. The affinity mask is
* later tested in the enable/disable paths.
*/
static int intc_set_affinity(struct irq_data *data,
const struct cpumask *cpumask,
bool force)
{
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
cpumask_copy(data->affinity, cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
#endif
static void intc_mask_ack(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = intc_get_ack_handle(irq);
unsigned long addr;
intc_disable(data);
/* read register and write zero only to the associated bit */
if (handle) {
unsigned int value;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
value = intc_set_field_from_handle(0, 1, handle);
switch (_INTC_FN(handle)) {
case REG_FN_MODIFY_BASE + 0: /* 8bit */
__raw_readb(addr);
__raw_writeb(0xff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 1: /* 16bit */
__raw_readw(addr);
__raw_writew(0xffff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 3: /* 32bit */
__raw_readl(addr);
__raw_writel(0xffffffff ^ value, addr);
break;
default:
BUG();
break;
}
}
}
static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
unsigned int nr_hp,
unsigned int irq)
{
struct intc_handle_int key;
key.irq = irq;
key.handle = 0;
return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp);
}
int intc_set_priority(unsigned int irq, unsigned int prio)
{
struct intc_desc_int *d = get_intc_desc(irq);
struct irq_data *data = irq_get_irq_data(irq);
struct intc_handle_int *ihp;
if (!intc_get_prio_level(irq) || prio <= 1)
return -EINVAL;
ihp = intc_find_irq(d->prio, d->nr_prio, irq);
if (ihp) {
if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
intc_set_prio_level(irq, prio);
/*
* only set secondary masking method directly
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(data, ihp->handle);
}
return 0;
}
#define SENSE_VALID_FLAG 0x80
#define VALID(x) (x | SENSE_VALID_FLAG)
static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
[IRQ_TYPE_EDGE_FALLING] = VALID(0),
[IRQ_TYPE_EDGE_RISING] = VALID(1),
[IRQ_TYPE_LEVEL_LOW] = VALID(2),
/* SH7706, SH7707 and SH7709 do not support high level triggered */
#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
!defined(CONFIG_CPU_SUBTYPE_SH7707) && \
!defined(CONFIG_CPU_SUBTYPE_SH7709)
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
#endif
#if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */
[IRQ_TYPE_EDGE_BOTH] = VALID(4),
#endif
};
static int intc_set_type(struct irq_data *data, unsigned int type)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
struct intc_handle_int *ihp;
unsigned long addr;
if (!value)
return -EINVAL;
value &= ~SENSE_VALID_FLAG;
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
if (ihp) {
/* PINT has 2-bit sense registers, should fail on EDGE_BOTH */
if (value >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
}
return 0;
}
struct irq_chip intc_irq_chip = {
.irq_mask = intc_disable,
.irq_unmask = intc_enable,
.irq_mask_ack = intc_mask_ack,
.irq_enable = intc_enable,
.irq_disable = intc_disable,
.irq_set_type = intc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = intc_set_affinity,
#endif
.flags = IRQCHIP_SKIP_SET_WAKE,
};
| gpl-2.0 |
alecuba16/android_kernel_iuni_msm8974 | drivers/net/irda/au1k_ir.c | 4940 | 23820 | /*
* Alchemy Semi Au1000 IrDA driver
*
* Copyright 2001 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/types.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
#include <asm/mach-au1x00/au1000.h>
/* registers */
#define IR_RING_PTR_STATUS 0x00
#define IR_RING_BASE_ADDR_H 0x04
#define IR_RING_BASE_ADDR_L 0x08
#define IR_RING_SIZE 0x0C
#define IR_RING_PROMPT 0x10
#define IR_RING_ADDR_CMPR 0x14
#define IR_INT_CLEAR 0x18
#define IR_CONFIG_1 0x20
#define IR_SIR_FLAGS 0x24
#define IR_STATUS 0x28
#define IR_READ_PHY_CONFIG 0x2C
#define IR_WRITE_PHY_CONFIG 0x30
#define IR_MAX_PKT_LEN 0x34
#define IR_RX_BYTE_CNT 0x38
#define IR_CONFIG_2 0x3C
#define IR_ENABLE 0x40
/* Config1 */
#define IR_RX_INVERT_LED (1 << 0)
#define IR_TX_INVERT_LED (1 << 1)
#define IR_ST (1 << 2)
#define IR_SF (1 << 3)
#define IR_SIR (1 << 4)
#define IR_MIR (1 << 5)
#define IR_FIR (1 << 6)
#define IR_16CRC (1 << 7)
#define IR_TD (1 << 8)
#define IR_RX_ALL (1 << 9)
#define IR_DMA_ENABLE (1 << 10)
#define IR_RX_ENABLE (1 << 11)
#define IR_TX_ENABLE (1 << 12)
#define IR_LOOPBACK (1 << 14)
#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
IR_16CRC)
/* ir_status */
#define IR_RX_STATUS (1 << 9)
#define IR_TX_STATUS (1 << 10)
#define IR_PHYEN (1 << 15)
/* ir_write_phy_config */
#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
#define IR_P(x) ((x) & 0x1f) /* preamble bits */
/* Config2 */
#define IR_MODE_INV (1 << 0)
#define IR_ONE_PIN (1 << 1)
#define IR_PHYCLK_40MHZ (0 << 2)
#define IR_PHYCLK_48MHZ (1 << 2)
#define IR_PHYCLK_56MHZ (2 << 2)
#define IR_PHYCLK_64MHZ (3 << 2)
#define IR_DP (1 << 4)
#define IR_DA (1 << 5)
#define IR_FLT_HIGH (0 << 6)
#define IR_FLT_MEDHI (1 << 6)
#define IR_FLT_MEDLO (2 << 6)
#define IR_FLT_LO (3 << 6)
#define IR_IEN (1 << 8)
/* ir_enable */
#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
#define IR_CE (1 << 2) /* clock enable */
#define IR_C (1 << 1) /* coherency bit */
#define IR_BE (1 << 0) /* set in big endian mode */
#define NUM_IR_DESC 64
#define RING_SIZE_4 0x0
#define RING_SIZE_16 0x3
#define RING_SIZE_64 0xF
#define MAX_NUM_IR_DESC 64
#define MAX_BUF_SIZE 2048
/* Ring descriptor flags */
#define AU_OWN (1 << 7) /* tx,rx */
#define IR_DIS_CRC (1 << 6) /* tx */
#define IR_BAD_CRC (1 << 5) /* tx */
#define IR_NEED_PULSE (1 << 4) /* tx */
#define IR_FORCE_UNDER (1 << 3) /* tx */
#define IR_DISABLE_TX (1 << 2) /* tx */
#define IR_HW_UNDER (1 << 0) /* tx */
#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
#define IR_PHY_ERROR (1 << 6) /* rx */
#define IR_CRC_ERROR (1 << 5) /* rx */
#define IR_MAX_LEN (1 << 4) /* rx */
#define IR_FIFO_OVER (1 << 3) /* rx */
#define IR_SIR_ERROR (1 << 2) /* rx */
#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
struct db_dest {
struct db_dest *pnext;
volatile u32 *vaddr;
dma_addr_t dma_addr;
};
struct ring_dest {
u8 count_0; /* 7:0 */
u8 count_1; /* 12:8 */
u8 reserved;
u8 flags;
u8 addr_0; /* 7:0 */
u8 addr_1; /* 15:8 */
u8 addr_2; /* 23:16 */
u8 addr_3; /* 31:24 */
};
/* Private data for each instance */
struct au1k_private {
void __iomem *iobase;
int irq_rx, irq_tx;
struct db_dest *pDBfree;
struct db_dest db[2 * NUM_IR_DESC];
volatile struct ring_dest *rx_ring[NUM_IR_DESC];
volatile struct ring_dest *tx_ring[NUM_IR_DESC];
struct db_dest *rx_db_inuse[NUM_IR_DESC];
struct db_dest *tx_db_inuse[NUM_IR_DESC];
u32 rx_head;
u32 tx_head;
u32 tx_tail;
u32 tx_full;
iobuff_t rx_buff;
struct net_device *netdev;
struct timeval stamp;
struct timeval now;
struct qos_info qos;
struct irlap_cb *irlap;
u8 open;
u32 speed;
u32 newspeed;
struct timer_list timer;
struct resource *ioarea;
struct au1k_irda_platform_data *platdata;
};
static int qos_mtt_bits = 0x07; /* 1 ms or more */
#define RUN_AT(x) (jiffies + (x))
static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
{
if (p->platdata && p->platdata->set_phy_mode)
p->platdata->set_phy_mode(mode);
}
static inline unsigned long irda_read(struct au1k_private *p,
unsigned long ofs)
{
/*
* IrDA peripheral bug. You have to read the register
* twice to get the right value.
*/
(void)__raw_readl(p->iobase + ofs);
return __raw_readl(p->iobase + ofs);
}
static inline void irda_write(struct au1k_private *p, unsigned long ofs,
unsigned long val)
{
__raw_writel(val, p->iobase + ofs);
wmb();
}
/*
* Buffer allocation/deallocation routines. The buffer descriptor returned
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
static struct db_dest *GetFreeDB(struct au1k_private *aup)
{
struct db_dest *db;
db = aup->pDBfree;
if (db)
aup->pDBfree = db->pnext;
return db;
}
/*
DMA memory allocation, derived from pci_alloc_consistent.
However, the Au1000 data cache is coherent (when programmed
so), therefore we return KSEG0 address, not KSEG1.
*/
static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC | GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_bus(ret);
ret = (void *)KSEG0ADDR(ret);
}
return ret;
}
static void dma_free(void *vaddr, size_t size)
{
vaddr = (void *)KSEG0ADDR(vaddr);
free_pages((unsigned long) vaddr, get_order(size));
}
static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
{
int i;
for (i = 0; i < NUM_IR_DESC; i++) {
aup->rx_ring[i] = (volatile struct ring_dest *)
(rx_base + sizeof(struct ring_dest) * i);
}
for (i = 0; i < NUM_IR_DESC; i++) {
aup->tx_ring[i] = (volatile struct ring_dest *)
(tx_base + sizeof(struct ring_dest) * i);
}
}
static int au1k_irda_init_iobuf(iobuff_t *io, int size)
{
io->head = kmalloc(size, GFP_KERNEL);
if (io->head != NULL) {
io->truesize = size;
io->in_frame = FALSE;
io->state = OUTSIDE_FRAME;
io->data = io->head;
}
return io->head ? 0 : -ENOMEM;
}
/*
* Set the IrDA communications speed.
*/
static int au1k_irda_set_speed(struct net_device *dev, int speed)
{
struct au1k_private *aup = netdev_priv(dev);
volatile struct ring_dest *ptxd;
unsigned long control;
int ret = 0, timeout = 10, i;
if (speed == aup->speed)
return ret;
/* disable PHY first */
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
/* disable RX/TX */
irda_write(aup, IR_CONFIG_1,
irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
msleep(20);
while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
msleep(20);
if (!timeout--) {
printk(KERN_ERR "%s: rx/tx disable timeout\n",
dev->name);
break;
}
}
/* disable DMA */
irda_write(aup, IR_CONFIG_1,
irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
msleep(20);
/* After we disable tx/rx. the index pointers go back to zero. */
aup->tx_head = aup->tx_tail = aup->rx_head = 0;
for (i = 0; i < NUM_IR_DESC; i++) {
ptxd = aup->tx_ring[i];
ptxd->flags = 0;
ptxd->count_0 = 0;
ptxd->count_1 = 0;
}
for (i = 0; i < NUM_IR_DESC; i++) {
ptxd = aup->rx_ring[i];
ptxd->count_0 = 0;
ptxd->count_1 = 0;
ptxd->flags = AU_OWN;
}
if (speed == 4000000)
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
else
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
switch (speed) {
case 9600:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
break;
case 19200:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
break;
case 38400:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
break;
case 57600:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
break;
case 115200:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
break;
case 4000000:
irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
IR_RX_ENABLE);
break;
default:
printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
ret = -EINVAL;
break;
}
aup->speed = speed;
irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
control = irda_read(aup, IR_STATUS);
irda_write(aup, IR_RING_PROMPT, 0);
if (control & (1 << 14)) {
printk(KERN_ERR "%s: configuration error\n", dev->name);
} else {
if (control & (1 << 11))
printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
if (control & (1 << 12))
printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
if (control & (1 << 13))
printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
if (control & (1 << 10))
printk(KERN_DEBUG "%s TX enabled\n", dev->name);
if (control & (1 << 9))
printk(KERN_DEBUG "%s RX enabled\n", dev->name);
}
return ret;
}
static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
{
struct net_device_stats *ps = &dev->stats;
ps->rx_packets++;
if (status & IR_RX_ERROR) {
ps->rx_errors++;
if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
ps->rx_missed_errors++;
if (status & IR_MAX_LEN)
ps->rx_length_errors++;
if (status & IR_CRC_ERROR)
ps->rx_crc_errors++;
} else
ps->rx_bytes += count;
}
static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
{
struct net_device_stats *ps = &dev->stats;
ps->tx_packets++;
ps->tx_bytes += pkt_len;
if (status & IR_TX_ERROR) {
ps->tx_errors++;
ps->tx_aborted_errors++;
}
}
static void au1k_tx_ack(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
volatile struct ring_dest *ptxd;
ptxd = aup->tx_ring[aup->tx_tail];
while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
update_tx_stats(dev, ptxd->flags,
(ptxd->count_1 << 8) | ptxd->count_0);
ptxd->count_0 = 0;
ptxd->count_1 = 0;
wmb();
aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
ptxd = aup->tx_ring[aup->tx_tail];
if (aup->tx_full) {
aup->tx_full = 0;
netif_wake_queue(dev);
}
}
if (aup->tx_tail == aup->tx_head) {
if (aup->newspeed) {
au1k_irda_set_speed(dev, aup->newspeed);
aup->newspeed = 0;
} else {
irda_write(aup, IR_CONFIG_1,
irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
irda_write(aup, IR_CONFIG_1,
irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
irda_write(aup, IR_RING_PROMPT, 0);
}
}
}
static int au1k_irda_rx(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
volatile struct ring_dest *prxd;
struct sk_buff *skb;
struct db_dest *pDB;
u32 flags, count;
prxd = aup->rx_ring[aup->rx_head];
flags = prxd->flags;
while (!(flags & AU_OWN)) {
pDB = aup->rx_db_inuse[aup->rx_head];
count = (prxd->count_1 << 8) | prxd->count_0;
if (!(flags & IR_RX_ERROR)) {
/* good frame */
update_rx_stats(dev, flags, count);
skb = alloc_skb(count + 1, GFP_ATOMIC);
if (skb == NULL) {
dev->stats.rx_dropped++;
continue;
}
skb_reserve(skb, 1);
if (aup->speed == 4000000)
skb_put(skb, count);
else
skb_put(skb, count - 2);
skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
count - 2);
skb->dev = dev;
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
prxd->count_0 = 0;
prxd->count_1 = 0;
}
prxd->flags |= AU_OWN;
aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
irda_write(aup, IR_RING_PROMPT, 0);
/* next descriptor */
prxd = aup->rx_ring[aup->rx_head];
flags = prxd->flags;
}
return 0;
}
static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
{
struct net_device *dev = dev_id;
struct au1k_private *aup = netdev_priv(dev);
irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
au1k_irda_rx(dev);
au1k_tx_ack(dev);
return IRQ_HANDLED;
}
static int au1k_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
u32 enable, ring_address;
int i;
enable = IR_HC | IR_CE | IR_C;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
enable |= IR_BE;
#endif
aup->tx_head = 0;
aup->tx_tail = 0;
aup->rx_head = 0;
for (i = 0; i < NUM_IR_DESC; i++)
aup->rx_ring[i]->flags = AU_OWN;
irda_write(aup, IR_ENABLE, enable);
msleep(20);
/* disable PHY */
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
msleep(20);
irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
irda_write(aup, IR_RING_SIZE,
(RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
irda_write(aup, IR_RING_ADDR_CMPR, 0);
au1k_irda_set_speed(dev, 9600);
return 0;
}
static int au1k_irda_start(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
char hwname[32];
int retval;
retval = au1k_init(dev);
if (retval) {
printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
return retval;
}
retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
free_irq(aup->irq_tx, dev);
printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
/* Give self a hardware name */
sprintf(hwname, "Au1000 SIR/FIR");
aup->irlap = irlap_open(dev, &aup->qos, hwname);
netif_start_queue(dev);
/* int enable */
irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
/* power up */
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
aup->timer.expires = RUN_AT((3 * HZ));
aup->timer.data = (unsigned long)dev;
return 0;
}
static int au1k_irda_stop(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
/* disable interrupts */
irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
irda_write(aup, IR_CONFIG_1, 0);
irda_write(aup, IR_ENABLE, 0); /* disable clock */
if (aup->irlap) {
irlap_close(aup->irlap);
aup->irlap = NULL;
}
netif_stop_queue(dev);
del_timer(&aup->timer);
/* disable the interrupt */
free_irq(aup->irq_tx, dev);
free_irq(aup->irq_rx, dev);
return 0;
}
/*
* Au1000 transmit routine.
*/
static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int speed = irda_get_next_speed(skb);
volatile struct ring_dest *ptxd;
struct db_dest *pDB;
u32 len, flags;
if (speed != aup->speed && speed != -1)
aup->newspeed = speed;
if ((skb->len == 0) && (aup->newspeed)) {
if (aup->tx_tail == aup->tx_head) {
au1k_irda_set_speed(dev, speed);
aup->newspeed = 0;
}
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
ptxd = aup->tx_ring[aup->tx_head];
flags = ptxd->flags;
if (flags & AU_OWN) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
} else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
}
pDB = aup->tx_db_inuse[aup->tx_head];
#if 0
if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
irda_read(aup, IR_RX_BYTE_CNT));
}
#endif
if (aup->speed == 4000000) {
/* FIR */
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
ptxd->count_0 = skb->len & 0xff;
ptxd->count_1 = (skb->len >> 8) & 0xff;
} else {
/* SIR */
len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
ptxd->count_0 = len & 0xff;
ptxd->count_1 = (len >> 8) & 0xff;
ptxd->flags |= IR_DIS_CRC;
}
ptxd->flags |= AU_OWN;
wmb();
irda_write(aup, IR_CONFIG_1,
irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
irda_write(aup, IR_RING_PROMPT, 0);
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
return NETDEV_TX_OK;
}
/*
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
*/
static void au1k_tx_timeout(struct net_device *dev)
{
u32 speed;
struct au1k_private *aup = netdev_priv(dev);
printk(KERN_ERR "%s: tx timeout\n", dev->name);
speed = aup->speed;
aup->speed = 0;
au1k_irda_set_speed(dev, speed);
aup->tx_full = 0;
netif_wake_queue(dev);
}
static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
{
struct if_irda_req *rq = (struct if_irda_req *)ifreq;
struct au1k_private *aup = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (cmd) {
case SIOCSBANDWIDTH:
if (capable(CAP_NET_ADMIN)) {
/*
* We are unable to set the speed if the
* device is not running.
*/
if (aup->open)
ret = au1k_irda_set_speed(dev,
rq->ifr_baudrate);
else {
printk(KERN_ERR "%s ioctl: !netif_running\n",
dev->name);
ret = 0;
}
}
break;
case SIOCSMEDIABUSY:
ret = -EPERM;
if (capable(CAP_NET_ADMIN)) {
irda_device_set_media_busy(dev, TRUE);
ret = 0;
}
break;
case SIOCGRECEIVING:
rq->ifr_receiving = 0;
break;
default:
break;
}
return ret;
}
static const struct net_device_ops au1k_irda_netdev_ops = {
.ndo_open = au1k_irda_start,
.ndo_stop = au1k_irda_stop,
.ndo_start_xmit = au1k_irda_hard_xmit,
.ndo_tx_timeout = au1k_tx_timeout,
.ndo_do_ioctl = au1k_irda_ioctl,
};
static int __devinit au1k_irda_net_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
struct db_dest *pDB, *pDBfree;
int i, err, retval = 0;
dma_addr_t temp;
err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
if (err)
goto out1;
dev->netdev_ops = &au1k_irda_netdev_ops;
irda_init_max_qos_capabilies(&aup->qos);
/* The only value we must override it the baudrate */
aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
aup->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&aup->qos);
retval = -ENOMEM;
/* Tx ring follows rx ring + 512 bytes */
/* we need a 1k aligned buffer */
aup->rx_ring[0] = (struct ring_dest *)
dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
&temp);
if (!aup->rx_ring[0])
goto out2;
/* allocate the data buffers */
aup->db[0].vaddr =
(void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
if (!aup->db[0].vaddr)
goto out3;
setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
pDBfree = NULL;
pDB = aup->db;
for (i = 0; i < (2 * NUM_IR_DESC); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
pDB->vaddr =
(u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
pDB++;
}
aup->pDBfree = pDBfree;
/* attach a data buffer to each descriptor */
for (i = 0; i < NUM_IR_DESC; i++) {
pDB = GetFreeDB(aup);
if (!pDB)
goto out3;
aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_IR_DESC; i++) {
pDB = GetFreeDB(aup);
if (!pDB)
goto out3;
aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
aup->tx_ring[i]->count_0 = 0;
aup->tx_ring[i]->count_1 = 0;
aup->tx_ring[i]->flags = 0;
aup->tx_db_inuse[i] = pDB;
}
return 0;
out3:
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
out2:
kfree(aup->rx_buff.head);
out1:
printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
return retval;
}
static int __devinit au1k_irda_probe(struct platform_device *pdev)
{
struct au1k_private *aup;
struct net_device *dev;
struct resource *r;
int err;
dev = alloc_irdadev(sizeof(struct au1k_private));
if (!dev)
return -ENOMEM;
aup = netdev_priv(dev);
aup->platdata = pdev->dev.platform_data;
err = -EINVAL;
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r)
goto out;
aup->irq_tx = r->start;
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r)
goto out;
aup->irq_rx = r->start;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
goto out;
err = -EBUSY;
aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
pdev->name);
if (!aup->ioarea)
goto out;
aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
if (!aup->iobase)
goto out2;
dev->irq = aup->irq_rx;
err = au1k_irda_net_init(dev);
if (err)
goto out3;
err = register_netdev(dev);
if (err)
goto out4;
platform_set_drvdata(pdev, dev);
printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
return 0;
out4:
dma_free((void *)aup->db[0].vaddr,
MAX_BUF_SIZE * 2 * NUM_IR_DESC);
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
kfree(aup->rx_buff.head);
out3:
iounmap(aup->iobase);
out2:
release_resource(aup->ioarea);
kfree(aup->ioarea);
out:
free_netdev(dev);
return err;
}
static int __devexit au1k_irda_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1k_private *aup = netdev_priv(dev);
unregister_netdev(dev);
dma_free((void *)aup->db[0].vaddr,
MAX_BUF_SIZE * 2 * NUM_IR_DESC);
dma_free((void *)aup->rx_ring[0],
2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
kfree(aup->rx_buff.head);
iounmap(aup->iobase);
release_resource(aup->ioarea);
kfree(aup->ioarea);
free_netdev(dev);
return 0;
}
static struct platform_driver au1k_irda_driver = {
.driver = {
.name = "au1000-irda",
.owner = THIS_MODULE,
},
.probe = au1k_irda_probe,
.remove = __devexit_p(au1k_irda_remove),
};
static int __init au1k_irda_load(void)
{
return platform_driver_register(&au1k_irda_driver);
}
static void __exit au1k_irda_unload(void)
{
return platform_driver_unregister(&au1k_irda_driver);
}
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
module_init(au1k_irda_load);
module_exit(au1k_irda_unload);
| gpl-2.0 |
moddingg33k/android_kernel_huawai_Y300-J1 | drivers/hwmon/adt7411.c | 4940 | 9947 | /*
* Driver for the ADT7411 (I2C/SPI 8 channel 10 bit ADC & temperature-sensor)
*
* Copyright (C) 2008, 2010 Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* TODO: SPI, support for external temperature sensor
* use power-down mode for suspend?, interrupt handling?
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
#define ADT7411_REG_INT_TEMP_VDD_LSB 0x03
#define ADT7411_REG_EXT_TEMP_AIN14_LSB 0x04
#define ADT7411_REG_VDD_MSB 0x06
#define ADT7411_REG_INT_TEMP_MSB 0x07
#define ADT7411_REG_EXT_TEMP_AIN1_MSB 0x08
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
#define ADT7411_REG_CFG2 0x19
#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
#define ADT7411_REG_CFG3 0x1a
#define ADT7411_CFG3_ADC_CLK_225 (1 << 0)
#define ADT7411_CFG3_REF_VDD (1 << 4)
#define ADT7411_REG_DEVICE_ID 0x4d
#define ADT7411_REG_MANUFACTURER_ID 0x4e
#define ADT7411_DEVICE_ID 0x2
#define ADT7411_MANUFACTURER_ID 0x41
static const unsigned short normal_i2c[] = { 0x48, 0x4a, 0x4b, I2C_CLIENT_END };
struct adt7411_data {
struct mutex device_lock; /* for "atomic" device accesses */
struct mutex update_lock;
unsigned long next_update;
int vref_cached;
struct device *hwmon_dev;
};
/*
* When reading a register containing (up to 4) lsb, all associated
* msb-registers get locked by the hardware. After _one_ of those msb is read,
* _all_ are unlocked. In order to use this locking correctly, reading lsb/msb
* is protected here with a mutex, too.
*/
static int adt7411_read_10_bit(struct i2c_client *client, u8 lsb_reg,
u8 msb_reg, u8 lsb_shift)
{
struct adt7411_data *data = i2c_get_clientdata(client);
int val, tmp;
mutex_lock(&data->device_lock);
val = i2c_smbus_read_byte_data(client, lsb_reg);
if (val < 0)
goto exit_unlock;
tmp = (val >> lsb_shift) & 3;
val = i2c_smbus_read_byte_data(client, msb_reg);
if (val >= 0)
val = (val << 2) | tmp;
exit_unlock:
mutex_unlock(&data->device_lock);
return val;
}
static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
bool flag)
{
struct adt7411_data *data = i2c_get_clientdata(client);
int ret, val;
mutex_lock(&data->device_lock);
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
goto exit_unlock;
if (flag)
val = ret | bit;
else
val = ret & ~bit;
ret = i2c_smbus_write_byte_data(client, reg, val);
exit_unlock:
mutex_unlock(&data->device_lock);
return ret;
}
static ssize_t adt7411_show_vdd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
ADT7411_REG_VDD_MSB, 2);
return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
}
static ssize_t adt7411_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int val = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
ADT7411_REG_INT_TEMP_MSB, 0);
if (val < 0)
return val;
val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
return sprintf(buf, "%d\n", val * 250);
}
static ssize_t adt7411_show_input(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct adt7411_data *data = i2c_get_clientdata(client);
int val;
u8 lsb_reg, lsb_shift;
mutex_lock(&data->update_lock);
if (time_after_eq(jiffies, data->next_update)) {
val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
if (val < 0)
goto exit_unlock;
if (val & ADT7411_CFG3_REF_VDD) {
val = adt7411_read_10_bit(client,
ADT7411_REG_INT_TEMP_VDD_LSB,
ADT7411_REG_VDD_MSB, 2);
if (val < 0)
goto exit_unlock;
data->vref_cached = val * 7000 / 1024;
} else {
data->vref_cached = 2250;
}
data->next_update = jiffies + HZ;
}
lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
lsb_shift = 2 * (nr & 0x03);
val = adt7411_read_10_bit(client, lsb_reg,
ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
if (val < 0)
goto exit_unlock;
val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
exit_unlock:
mutex_unlock(&data->update_lock);
return val;
}
static ssize_t adt7411_show_bit(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
int ret = i2c_smbus_read_byte_data(client, attr2->index);
return ret < 0 ? ret : sprintf(buf, "%u\n", !!(ret & attr2->nr));
}
static ssize_t adt7411_set_bit(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute_2 *s_attr2 = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7411_data *data = i2c_get_clientdata(client);
int ret;
unsigned long flag;
ret = kstrtoul(buf, 0, &flag);
if (ret || flag > 1)
return -EINVAL;
ret = adt7411_modify_bit(client, s_attr2->index, s_attr2->nr, flag);
/* force update */
mutex_lock(&data->update_lock);
data->next_update = jiffies;
mutex_unlock(&data->update_lock);
return ret < 0 ? ret : count;
}
#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
adt7411_set_bit, __bit, __reg)
static DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL);
static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
static struct attribute *adt7411_attrs[] = {
&dev_attr_temp1_input.attr,
&dev_attr_in0_input.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_no_average.dev_attr.attr,
&sensor_dev_attr_fast_sampling.dev_attr.attr,
&sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
NULL
};
static const struct attribute_group adt7411_attr_grp = {
.attrs = adt7411_attrs,
};
static int adt7411_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int val;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
val = i2c_smbus_read_byte_data(client, ADT7411_REG_MANUFACTURER_ID);
if (val < 0 || val != ADT7411_MANUFACTURER_ID) {
dev_dbg(&client->dev, "Wrong manufacturer ID. Got %d, "
"expected %d\n", val, ADT7411_MANUFACTURER_ID);
return -ENODEV;
}
val = i2c_smbus_read_byte_data(client, ADT7411_REG_DEVICE_ID);
if (val < 0 || val != ADT7411_DEVICE_ID) {
dev_dbg(&client->dev, "Wrong device ID. Got %d, "
"expected %d\n", val, ADT7411_DEVICE_ID);
return -ENODEV;
}
strlcpy(info->type, "adt7411", I2C_NAME_SIZE);
return 0;
}
static int __devinit adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adt7411_data *data;
int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->device_lock);
mutex_init(&data->update_lock);
ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
ADT7411_CFG1_START_MONITOR, 1);
if (ret < 0)
goto exit_free;
/* force update on first occasion */
data->next_update = jiffies;
ret = sysfs_create_group(&client->dev.kobj, &adt7411_attr_grp);
if (ret)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
dev_info(&client->dev, "successfully registered\n");
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
exit_free:
kfree(data);
return ret;
}
static int __devexit adt7411_remove(struct i2c_client *client)
{
struct adt7411_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
kfree(data);
return 0;
}
static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
.name = "adt7411",
},
.probe = adt7411_probe,
.remove = __devexit_p(adt7411_remove),
.id_table = adt7411_id,
.detect = adt7411_detect,
.address_list = normal_i2c,
.class = I2C_CLASS_HWMON,
};
module_i2c_driver(adt7411_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and "
"Wolfram Sang <w.sang@pengutronix.de>");
MODULE_DESCRIPTION("ADT7411 driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
cristianhristea/linux_kernel | arch/powerpc/sysdev/bestcomm/sram.c | 4940 | 4393 | /*
* Simple memory allocator for on-board SRAM
*
*
* Maintainer : Sylvain Munaut <tnt@246tNt.com>
*
* Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/mmu.h>
#include "sram.h"
/* Struct keeping our 'state' */
struct bcom_sram *bcom_sram = NULL;
EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
/* ======================================================================== */
/* Public API */
/* ======================================================================== */
/* DO NOT USE in interrupts, if needed in irq handler, we should use the
_irqsave version of the spin_locks */
int bcom_sram_init(struct device_node *sram_node, char *owner)
{
int rv;
const u32 *regaddr_p;
u64 regaddr64, size64;
unsigned int psize;
/* Create our state struct */
if (bcom_sram) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Already initialized !\n", owner);
return -EBUSY;
}
bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
if (!bcom_sram) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Couldn't allocate internal state !\n", owner);
return -ENOMEM;
}
/* Get address and size of the sram */
regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
if (!regaddr_p) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Invalid device node !\n", owner);
rv = -EINVAL;
goto error_free;
}
regaddr64 = of_translate_address(sram_node, regaddr_p);
bcom_sram->base_phys = (phys_addr_t) regaddr64;
bcom_sram->size = (unsigned int) size64;
/* Request region */
if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Couldn't request region !\n", owner);
rv = -EBUSY;
goto error_free;
}
/* Map SRAM */
/* sram is not really __iomem */
bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
if (!bcom_sram->base_virt) {
printk(KERN_ERR "%s: bcom_sram_init: "
"Map error SRAM zone 0x%08lx (0x%0x)!\n",
owner, (long)bcom_sram->base_phys, bcom_sram->size );
rv = -ENOMEM;
goto error_release;
}
/* Create an rheap (defaults to 32 bits word alignment) */
bcom_sram->rh = rh_create(4);
/* Attach the free zones */
#if 0
/* Currently disabled ... for future use only */
reg_addr_p = of_get_property(sram_node, "available", &psize);
#else
regaddr_p = NULL;
psize = 0;
#endif
if (!regaddr_p || !psize) {
/* Attach the whole zone */
rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
} else {
/* Attach each zone independently */
while (psize >= 2 * sizeof(u32)) {
phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
regaddr_p += 2;
psize -= 2 * sizeof(u32);
}
}
/* Init our spinlock */
spin_lock_init(&bcom_sram->lock);
return 0;
error_release:
release_mem_region(bcom_sram->base_phys, bcom_sram->size);
error_free:
kfree(bcom_sram);
bcom_sram = NULL;
return rv;
}
EXPORT_SYMBOL_GPL(bcom_sram_init);
void bcom_sram_cleanup(void)
{
/* Free resources */
if (bcom_sram) {
rh_destroy(bcom_sram->rh);
iounmap((void __iomem *)bcom_sram->base_virt);
release_mem_region(bcom_sram->base_phys, bcom_sram->size);
kfree(bcom_sram);
bcom_sram = NULL;
}
}
EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
{
unsigned long offset;
spin_lock(&bcom_sram->lock);
offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
spin_unlock(&bcom_sram->lock);
if (IS_ERR_VALUE(offset))
return NULL;
*phys = bcom_sram->base_phys + offset;
return bcom_sram->base_virt + offset;
}
EXPORT_SYMBOL_GPL(bcom_sram_alloc);
void bcom_sram_free(void *ptr)
{
unsigned long offset;
if (!ptr)
return;
offset = ptr - bcom_sram->base_virt;
spin_lock(&bcom_sram->lock);
rh_free(bcom_sram->rh, offset);
spin_unlock(&bcom_sram->lock);
}
EXPORT_SYMBOL_GPL(bcom_sram_free);
| gpl-2.0 |
VinSukhthanker/android_kernel_LENOVO_IdeaTab_A1000-G | drivers/xen/xenbus/xenbus_dev_backend.c | 4940 | 1858 | #include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/xenbus_dev.h>
#include "xenbus_comms.h"
MODULE_LICENSE("GPL");
static int xenbus_backend_open(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return nonseekable_open(inode, filp);
}
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case IOCTL_XENBUS_BACKEND_EVTCHN:
if (xen_store_evtchn > 0)
return xen_store_evtchn;
return -ENODEV;
default:
return -ENOTTY;
}
}
static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
return -EINVAL;
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(xen_store_interface),
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
const struct file_operations xenbus_backend_fops = {
.open = xenbus_backend_open,
.mmap = xenbus_backend_mmap,
.unlocked_ioctl = xenbus_backend_ioctl,
};
static struct miscdevice xenbus_backend_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "xen/xenbus_backend",
.fops = &xenbus_backend_fops,
};
static int __init xenbus_backend_init(void)
{
int err;
if (!xen_initial_domain())
return -ENODEV;
err = misc_register(&xenbus_backend_dev);
if (err)
printk(KERN_ERR "Could not register xenbus backend device\n");
return err;
}
static void __exit xenbus_backend_exit(void)
{
misc_deregister(&xenbus_backend_dev);
}
module_init(xenbus_backend_init);
module_exit(xenbus_backend_exit);
| gpl-2.0 |
andip71/boeffla-kernel-samsung-s5 | fs/ufs/ialloc.c | 4940 | 9394 | /*
* linux/fs/ufs/ialloc.c
*
* Copyright (c) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*
* from
*
* linux/fs/ext2/ialloc.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* BSD ufs-inspired inode and directory allocation by
* Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
* UFS2 write support added by
* Evgeniy Dushistov <dushistov@mail.ru>, 2007
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "swab.h"
#include "util.h"
/*
* NOTE! When we get the inode, we're the only people
* that have access to it, and as such there are no
* race conditions we have to worry about. The inode
* is not on the hash-lists, and it cannot be reached
* through the filesystem because the directory entry
* has been deleted earlier.
*
* HOWEVER: we must make sure that we get no aliases,
* which means that we have to call "clear_inode()"
* _before_ we mark the inode not in use in the inode
* bitmaps. Otherwise a newly created file might use
* the same inode number (not actually the same pointer
* though), and then we'd have two inodes sharing the
* same inode number and space on the harddisk.
*/
void ufs_free_inode (struct inode * inode)
{
struct super_block * sb;
struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
int is_directory;
unsigned ino, cg, bit;
UFSD("ENTER, ino %lu\n", inode->i_ino);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(uspi);
ino = inode->i_ino;
lock_super (sb);
if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
unlock_super (sb);
return;
}
cg = ufs_inotocg (ino);
bit = ufs_inotocgoff (ino);
ucpi = ufs_load_cylinder (sb, cg);
if (!ucpi) {
unlock_super (sb);
return;
}
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
if (!ufs_cg_chkmagic(sb, ucg))
ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
ucg->cg_time = cpu_to_fs32(sb, get_seconds());
is_directory = S_ISDIR(inode->i_mode);
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
else {
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
if (ino < ucpi->c_irotor)
ucpi->c_irotor = ino;
fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
uspi->cs_total.cs_nifree++;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
if (is_directory) {
fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
uspi->cs_total.cs_ndir--;
fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
}
}
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
sb->s_dirt = 1;
unlock_super (sb);
UFSD("EXIT\n");
}
/*
* Nullify new chunk of inodes,
* BSD people also set ui_gen field of inode
* during nullification, but we not care about
* that because of linux ufs do not support NFS
*/
static void ufs2_init_inodes_chunk(struct super_block *sb,
struct ufs_cg_private_info *ucpi,
struct ufs_cylinder_group *ucg)
{
struct buffer_head *bh;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
sector_t beg = uspi->s_sbbase +
ufs_inotofsba(ucpi->c_cgx * uspi->s_ipg +
fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk));
sector_t end = beg + uspi->s_fpb;
UFSD("ENTER cgno %d\n", ucpi->c_cgx);
for (; beg < end; ++beg) {
bh = sb_getblk(sb, beg);
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
UFSD("EXIT\n");
}
/*
* There are two policies for allocating an inode. If the new inode is
* a directory, then a forward search is made for a block group with both
* free space and a low directory-to-inode ratio; if that fails, then of
* the groups with above-average free space, that group with the fewest
* directories already is chosen.
*
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block * sb;
struct ufs_sb_info * sbi;
struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
struct inode * inode;
unsigned cg, bit, i, j, start;
struct ufs_inode_info *ufsi;
int err = -ENOSPC;
UFSD("ENTER\n");
/* Cannot create files in a deleted directory */
if (!dir || !dir->i_nlink)
return ERR_PTR(-EPERM);
sb = dir->i_sb;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
ufsi = UFS_I(inode);
sbi = UFS_SB(sb);
uspi = sbi->s_uspi;
usb1 = ubh_get_usb_first(uspi);
lock_super (sb);
/*
* Try to place the inode in its parent directory
*/
i = ufs_inotocg(dir->i_ino);
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
/*
* Use a quadratic hash to find a group with a free inode
*/
for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
i += j;
if (i >= uspi->s_ncg)
i -= uspi->s_ncg;
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
}
/*
* That failed: try linear search for a free inode
*/
i = ufs_inotocg(dir->i_ino) + 1;
for (j = 2; j < uspi->s_ncg; j++) {
i++;
if (i >= uspi->s_ncg)
i = 0;
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
}
goto failed;
cg_found:
ucpi = ufs_load_cylinder (sb, cg);
if (!ucpi) {
err = -EIO;
goto failed;
}
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
if (!ufs_cg_chkmagic(sb, ucg))
ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");
start = ucpi->c_irotor;
bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
if (!(bit < uspi->s_ipg)) {
bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
if (!(bit < start)) {
ufs_error (sb, "ufs_new_inode",
"cylinder group %u corrupted - error in inode bitmap\n", cg);
err = -EIO;
goto failed;
}
}
UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
else {
ufs_panic (sb, "ufs_new_inode", "internal error");
err = -EIO;
goto failed;
}
if (uspi->fs_magic == UFS2_MAGIC) {
u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk);
if (bit + uspi->s_inopb > initediblk &&
initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk))
ufs2_init_inodes_chunk(sb, ucpi, ucg);
}
fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
uspi->cs_total.cs_nifree--;
fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
if (S_ISDIR(mode)) {
fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
uspi->cs_total.cs_ndir++;
fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
}
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
sb->s_dirt = 1;
inode->i_ino = cg * uspi->s_ipg + bit;
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_generation = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
ufsi->i_flags = UFS_I(dir)->i_flags;
ufsi->i_lastfrag = 0;
ufsi->i_shadow = 0;
ufsi->i_osync = 0;
ufsi->i_oeftflag = 0;
ufsi->i_dir_start_lookup = 0;
memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
insert_inode_hash(inode);
mark_inode_dirty(inode);
if (uspi->fs_magic == UFS2_MAGIC) {
struct buffer_head *bh;
struct ufs2_inode *ufs2_inode;
/*
* setup birth date, we do it here because of there is no sense
* to hold it in struct ufs_inode_info, and lose 64 bit
*/
bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
if (!bh) {
ufs_warning(sb, "ufs_read_inode",
"unable to read inode %lu\n",
inode->i_ino);
err = -EIO;
goto fail_remove_inode;
}
lock_buffer(bh);
ufs2_inode = (struct ufs2_inode *)bh->b_data;
ufs2_inode += ufs_inotofsbo(inode->i_ino);
ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
unlock_super (sb);
UFSD("allocating inode %lu\n", inode->i_ino);
UFSD("EXIT\n");
return inode;
fail_remove_inode:
unlock_super(sb);
clear_nlink(inode);
iput(inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
failed:
unlock_super (sb);
make_bad_inode(inode);
iput (inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
}
| gpl-2.0 |
vDorst/linux | drivers/hwmon/adt7462.c | 4940 | 60232 | /*
* A hwmon driver for the Analog Devices ADT7462
* Copyright (C) 2008 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
/* ADT7462 registers */
#define ADT7462_REG_DEVICE 0x3D
#define ADT7462_REG_VENDOR 0x3E
#define ADT7462_REG_REVISION 0x3F
#define ADT7462_REG_MIN_TEMP_BASE_ADDR 0x44
#define ADT7462_REG_MIN_TEMP_MAX_ADDR 0x47
#define ADT7462_REG_MAX_TEMP_BASE_ADDR 0x48
#define ADT7462_REG_MAX_TEMP_MAX_ADDR 0x4B
#define ADT7462_REG_TEMP_BASE_ADDR 0x88
#define ADT7462_REG_TEMP_MAX_ADDR 0x8F
#define ADT7462_REG_FAN_BASE_ADDR 0x98
#define ADT7462_REG_FAN_MAX_ADDR 0x9F
#define ADT7462_REG_FAN2_BASE_ADDR 0xA2
#define ADT7462_REG_FAN2_MAX_ADDR 0xA9
#define ADT7462_REG_FAN_ENABLE 0x07
#define ADT7462_REG_FAN_MIN_BASE_ADDR 0x78
#define ADT7462_REG_FAN_MIN_MAX_ADDR 0x7F
#define ADT7462_REG_CFG2 0x02
#define ADT7462_FSPD_MASK 0x20
#define ADT7462_REG_PWM_BASE_ADDR 0xAA
#define ADT7462_REG_PWM_MAX_ADDR 0xAD
#define ADT7462_REG_PWM_MIN_BASE_ADDR 0x28
#define ADT7462_REG_PWM_MIN_MAX_ADDR 0x2B
#define ADT7462_REG_PWM_MAX 0x2C
#define ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR 0x5C
#define ADT7462_REG_PWM_TEMP_MIN_MAX_ADDR 0x5F
#define ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR 0x60
#define ADT7462_REG_PWM_TEMP_RANGE_MAX_ADDR 0x63
#define ADT7462_PWM_HYST_MASK 0x0F
#define ADT7462_PWM_RANGE_MASK 0xF0
#define ADT7462_PWM_RANGE_SHIFT 4
#define ADT7462_REG_PWM_CFG_BASE_ADDR 0x21
#define ADT7462_REG_PWM_CFG_MAX_ADDR 0x24
#define ADT7462_PWM_CHANNEL_MASK 0xE0
#define ADT7462_PWM_CHANNEL_SHIFT 5
#define ADT7462_REG_PIN_CFG_BASE_ADDR 0x10
#define ADT7462_REG_PIN_CFG_MAX_ADDR 0x13
#define ADT7462_PIN7_INPUT 0x01 /* cfg0 */
#define ADT7462_DIODE3_INPUT 0x20
#define ADT7462_DIODE1_INPUT 0x40
#define ADT7462_VID_INPUT 0x80
#define ADT7462_PIN22_INPUT 0x04 /* cfg1 */
#define ADT7462_PIN21_INPUT 0x08
#define ADT7462_PIN19_INPUT 0x10
#define ADT7462_PIN15_INPUT 0x20
#define ADT7462_PIN13_INPUT 0x40
#define ADT7462_PIN8_INPUT 0x80
#define ADT7462_PIN23_MASK 0x03
#define ADT7462_PIN23_SHIFT 0
#define ADT7462_PIN26_MASK 0x0C /* cfg2 */
#define ADT7462_PIN26_SHIFT 2
#define ADT7462_PIN25_MASK 0x30
#define ADT7462_PIN25_SHIFT 4
#define ADT7462_PIN24_MASK 0xC0
#define ADT7462_PIN24_SHIFT 6
#define ADT7462_PIN26_VOLT_INPUT 0x08
#define ADT7462_PIN25_VOLT_INPUT 0x20
#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
#define ADT7462_PIN28_VOLT 0x5
#define ADT7462_REG_ALARM1 0xB8
#define ADT7462_LT_ALARM 0x02
#define ADT7462_R1T_ALARM 0x04
#define ADT7462_R2T_ALARM 0x08
#define ADT7462_R3T_ALARM 0x10
#define ADT7462_REG_ALARM2 0xBB
#define ADT7462_V0_ALARM 0x01
#define ADT7462_V1_ALARM 0x02
#define ADT7462_V2_ALARM 0x04
#define ADT7462_V3_ALARM 0x08
#define ADT7462_V4_ALARM 0x10
#define ADT7462_V5_ALARM 0x20
#define ADT7462_V6_ALARM 0x40
#define ADT7462_V7_ALARM 0x80
#define ADT7462_REG_ALARM3 0xBC
#define ADT7462_V8_ALARM 0x08
#define ADT7462_V9_ALARM 0x10
#define ADT7462_V10_ALARM 0x20
#define ADT7462_V11_ALARM 0x40
#define ADT7462_V12_ALARM 0x80
#define ADT7462_REG_ALARM4 0xBD
#define ADT7462_F0_ALARM 0x01
#define ADT7462_F1_ALARM 0x02
#define ADT7462_F2_ALARM 0x04
#define ADT7462_F3_ALARM 0x08
#define ADT7462_F4_ALARM 0x10
#define ADT7462_F5_ALARM 0x20
#define ADT7462_F6_ALARM 0x40
#define ADT7462_F7_ALARM 0x80
#define ADT7462_ALARM1 0x0000
#define ADT7462_ALARM2 0x0100
#define ADT7462_ALARM3 0x0200
#define ADT7462_ALARM4 0x0300
#define ADT7462_ALARM_REG_SHIFT 8
#define ADT7462_ALARM_FLAG_MASK 0x0F
#define ADT7462_TEMP_COUNT 4
#define ADT7462_TEMP_REG(x) (ADT7462_REG_TEMP_BASE_ADDR + ((x) * 2))
#define ADT7462_TEMP_MIN_REG(x) (ADT7462_REG_MIN_TEMP_BASE_ADDR + (x))
#define ADT7462_TEMP_MAX_REG(x) (ADT7462_REG_MAX_TEMP_BASE_ADDR + (x))
#define TEMP_FRAC_OFFSET 6
#define ADT7462_FAN_COUNT 8
#define ADT7462_REG_FAN_MIN(x) (ADT7462_REG_FAN_MIN_BASE_ADDR + (x))
#define ADT7462_PWM_COUNT 4
#define ADT7462_REG_PWM(x) (ADT7462_REG_PWM_BASE_ADDR + (x))
#define ADT7462_REG_PWM_MIN(x) (ADT7462_REG_PWM_MIN_BASE_ADDR + (x))
#define ADT7462_REG_PWM_TMIN(x) \
(ADT7462_REG_PWM_TEMP_MIN_BASE_ADDR + (x))
#define ADT7462_REG_PWM_TRANGE(x) \
(ADT7462_REG_PWM_TEMP_RANGE_BASE_ADDR + (x))
#define ADT7462_PIN_CFG_REG_COUNT 4
#define ADT7462_REG_PIN_CFG(x) (ADT7462_REG_PIN_CFG_BASE_ADDR + (x))
#define ADT7462_REG_PWM_CFG(x) (ADT7462_REG_PWM_CFG_BASE_ADDR + (x))
#define ADT7462_ALARM_REG_COUNT 4
/*
* The chip can measure 13 different voltage sources:
*
* 1. +12V1 (pin 7)
* 2. Vccp1/+2.5V/+1.8V/+1.5V (pin 23)
* 3. +12V3 (pin 22)
* 4. +5V (pin 21)
* 5. +1.25V/+0.9V (pin 19)
* 6. +2.5V/+1.8V (pin 15)
* 7. +3.3v (pin 13)
* 8. +12V2 (pin 8)
* 9. Vbatt/FSB_Vtt (pin 26)
* A. +3.3V/+1.2V1 (pin 25)
* B. Vccp2/+2.5V/+1.8V/+1.5V (pin 24)
* C. +1.5V ICH (only if BOTH pin 28/29 are set to +1.5V)
* D. +1.5V 3GPIO (only if BOTH pin 28/29 are set to +1.5V)
*
* Each of these 13 has a factor to convert raw to voltage. Even better,
* the pins can be connected to other sensors (tach/gpio/hot/etc), which
* makes the bookkeeping tricky.
*
* Some, but not all, of these voltages have low/high limits.
*/
#define ADT7462_VOLT_COUNT 13
#define ADT7462_VENDOR 0x41
#define ADT7462_DEVICE 0x62
/* datasheet only mentions a revision 4 */
#define ADT7462_REVISION 0x04
/* How often do we reread sensors values? (In jiffies) */
#define SENSOR_REFRESH_INTERVAL (2 * HZ)
/* How often do we reread sensor limit values? (In jiffies) */
#define LIMIT_REFRESH_INTERVAL (60 * HZ)
/* datasheet says to divide this number by the fan reading to get fan rpm */
#define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x))
#define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM
#define FAN_PERIOD_INVALID 65535
#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
#define MASK_AND_SHIFT(value, prefix) \
(((value) & prefix##_MASK) >> prefix##_SHIFT)
struct adt7462_data {
struct device *hwmon_dev;
struct attribute_group attrs;
struct mutex lock;
char sensors_valid;
char limits_valid;
unsigned long sensors_last_updated; /* In jiffies */
unsigned long limits_last_updated; /* In jiffies */
u8 temp[ADT7462_TEMP_COUNT];
/* bits 6-7 are quarter pieces of temp */
u8 temp_frac[ADT7462_TEMP_COUNT];
u8 temp_min[ADT7462_TEMP_COUNT];
u8 temp_max[ADT7462_TEMP_COUNT];
u16 fan[ADT7462_FAN_COUNT];
u8 fan_enabled;
u8 fan_min[ADT7462_FAN_COUNT];
u8 cfg2;
u8 pwm[ADT7462_PWM_COUNT];
u8 pin_cfg[ADT7462_PIN_CFG_REG_COUNT];
u8 voltages[ADT7462_VOLT_COUNT];
u8 volt_max[ADT7462_VOLT_COUNT];
u8 volt_min[ADT7462_VOLT_COUNT];
u8 pwm_min[ADT7462_PWM_COUNT];
u8 pwm_tmin[ADT7462_PWM_COUNT];
u8 pwm_trange[ADT7462_PWM_COUNT];
u8 pwm_max; /* only one per chip */
u8 pwm_cfg[ADT7462_PWM_COUNT];
u8 alarms[ADT7462_ALARM_REG_COUNT];
};
static int adt7462_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7462_remove(struct i2c_client *client);
static const struct i2c_device_id adt7462_id[] = {
{ "adt7462", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7462_id);
static struct i2c_driver adt7462_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adt7462",
},
.probe = adt7462_probe,
.remove = adt7462_remove,
.id_table = adt7462_id,
.detect = adt7462_detect,
.address_list = normal_i2c,
};
/*
* 16-bit registers on the ADT7462 are low-byte first. The data sheet says
* that the low byte must be read before the high byte.
*/
static inline int adt7462_read_word_data(struct i2c_client *client, u8 reg)
{
u16 foo;
foo = i2c_smbus_read_byte_data(client, reg);
foo |= ((u16)i2c_smbus_read_byte_data(client, reg + 1) << 8);
return foo;
}
/* For some reason these registers are not contiguous. */
static int ADT7462_REG_FAN(int fan)
{
if (fan < 4)
return ADT7462_REG_FAN_BASE_ADDR + (2 * fan);
return ADT7462_REG_FAN2_BASE_ADDR + (2 * (fan - 4));
}
/* Voltage registers are scattered everywhere */
static int ADT7462_REG_VOLT_MAX(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
return 0x7C;
break;
case 1:
return 0x69;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 0x7F;
break;
case 3:
if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
return 0x7E;
break;
case 4:
if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
return 0x4B;
break;
case 5:
if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
return 0x49;
break;
case 6:
if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
return 0x68;
break;
case 7:
if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
return 0x7D;
break;
case 8:
if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
return 0x6C;
break;
case 9:
if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
return 0x6B;
break;
case 10:
return 0x6A;
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x50;
break;
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x4C;
break;
}
return -ENODEV;
}
static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
return 0x6D;
break;
case 1:
return 0x72;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 0x6F;
break;
case 3:
if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
return 0x71;
break;
case 4:
if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
return 0x47;
break;
case 5:
if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
return 0x45;
break;
case 6:
if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
return 0x70;
break;
case 7:
if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
return 0x6E;
break;
case 8:
if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
return 0x75;
break;
case 9:
if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
return 0x74;
break;
case 10:
return 0x73;
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x76;
break;
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x77;
break;
}
return -ENODEV;
}
static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
return 0xA3;
break;
case 1:
return 0x90;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 0xA9;
break;
case 3:
if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
return 0xA7;
break;
case 4:
if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT))
return 0x8F;
break;
case 5:
if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT))
return 0x8B;
break;
case 6:
if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
return 0x96;
break;
case 7:
if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
return 0xA5;
break;
case 8:
if (!(data->pin_cfg[2] & ADT7462_PIN26_VOLT_INPUT))
return 0x93;
break;
case 9:
if (!(data->pin_cfg[2] & ADT7462_PIN25_VOLT_INPUT))
return 0x92;
break;
case 10:
return 0x91;
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x94;
break;
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 0x95;
break;
}
return -ENODEV;
}
/* Provide labels for sysfs */
static const char *voltage_label(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
return "+12V1";
break;
case 1:
switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) {
case 0:
return "Vccp1";
case 1:
return "+2.5V";
case 2:
return "+1.8V";
case 3:
return "+1.5V";
}
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return "+12V3";
break;
case 3:
if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
return "+5V";
break;
case 4:
if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) {
if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
return "+0.9V";
return "+1.25V";
}
break;
case 5:
if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) {
if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
return "+1.8V";
return "+2.5V";
}
break;
case 6:
if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
return "+3.3V";
break;
case 7:
if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
return "+12V2";
break;
case 8:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) {
case 0:
return "Vbatt";
case 1:
return "FSB_Vtt";
}
break;
case 9:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) {
case 0:
return "+3.3V";
case 1:
return "+1.2V1";
}
break;
case 10:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) {
case 0:
return "Vccp2";
case 1:
return "+2.5V";
case 2:
return "+1.8V";
case 3:
return "+1.5";
}
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return "+1.5V ICH";
break;
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return "+1.5V 3GPIO";
break;
}
return "N/A";
}
/* Multipliers are actually in uV, not mV. */
static int voltage_multiplier(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
if (!(data->pin_cfg[0] & ADT7462_PIN7_INPUT))
return 62500;
break;
case 1:
switch (MASK_AND_SHIFT(data->pin_cfg[1], ADT7462_PIN23)) {
case 0:
if (data->pin_cfg[0] & ADT7462_VID_INPUT)
return 12500;
return 6250;
case 1:
return 13000;
case 2:
return 9400;
case 3:
return 7800;
}
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 62500;
break;
case 3:
if (!(data->pin_cfg[1] & ADT7462_PIN21_INPUT))
return 26000;
break;
case 4:
if (!(data->pin_cfg[0] & ADT7462_DIODE3_INPUT)) {
if (data->pin_cfg[1] & ADT7462_PIN19_INPUT)
return 4690;
return 6500;
}
break;
case 5:
if (!(data->pin_cfg[0] & ADT7462_DIODE1_INPUT)) {
if (data->pin_cfg[1] & ADT7462_PIN15_INPUT)
return 9400;
return 13000;
}
break;
case 6:
if (!(data->pin_cfg[1] & ADT7462_PIN13_INPUT))
return 17200;
break;
case 7:
if (!(data->pin_cfg[1] & ADT7462_PIN8_INPUT))
return 62500;
break;
case 8:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN26)) {
case 0:
return 15600;
case 1:
return 6250;
}
break;
case 9:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN25)) {
case 0:
return 17200;
case 1:
return 6250;
}
break;
case 10:
switch (MASK_AND_SHIFT(data->pin_cfg[2], ADT7462_PIN24)) {
case 0:
return 6250;
case 1:
return 13000;
case 2:
return 9400;
case 3:
return 7800;
}
case 11:
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
!(data->pin_cfg[0] & ADT7462_VID_INPUT))
return 7800;
}
return 0;
}
static int temp_enabled(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
case 2:
return 1;
case 1:
if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT)
return 1;
break;
case 3:
if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT)
return 1;
break;
}
return 0;
}
static const char *temp_label(struct adt7462_data *data, int which)
{
switch (which) {
case 0:
return "local";
case 1:
if (data->pin_cfg[0] & ADT7462_DIODE1_INPUT)
return "remote1";
break;
case 2:
return "remote2";
case 3:
if (data->pin_cfg[0] & ADT7462_DIODE3_INPUT)
return "remote3";
break;
}
return "N/A";
}
/* Map Trange register values to mC */
#define NUM_TRANGE_VALUES 16
static const int trange_values[NUM_TRANGE_VALUES] = {
2000,
2500,
3300,
4000,
5000,
6700,
8000,
10000,
13300,
16000,
20000,
26700,
32000,
40000,
53300,
80000
};
static int find_trange_value(int trange)
{
int i;
for (i = 0; i < NUM_TRANGE_VALUES; i++)
if (trange_values[i] == trange)
return i;
return -ENODEV;
}
static struct adt7462_data *adt7462_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
unsigned long local_jiffies = jiffies;
int i;
mutex_lock(&data->lock);
if (time_before(local_jiffies, data->sensors_last_updated +
SENSOR_REFRESH_INTERVAL)
&& data->sensors_valid)
goto no_sensor_update;
for (i = 0; i < ADT7462_TEMP_COUNT; i++) {
/*
* Reading the fractional register locks the integral
* register until both have been read.
*/
data->temp_frac[i] = i2c_smbus_read_byte_data(client,
ADT7462_TEMP_REG(i));
data->temp[i] = i2c_smbus_read_byte_data(client,
ADT7462_TEMP_REG(i) + 1);
}
for (i = 0; i < ADT7462_FAN_COUNT; i++)
data->fan[i] = adt7462_read_word_data(client,
ADT7462_REG_FAN(i));
data->fan_enabled = i2c_smbus_read_byte_data(client,
ADT7462_REG_FAN_ENABLE);
for (i = 0; i < ADT7462_PWM_COUNT; i++)
data->pwm[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PWM(i));
for (i = 0; i < ADT7462_PIN_CFG_REG_COUNT; i++)
data->pin_cfg[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PIN_CFG(i));
for (i = 0; i < ADT7462_VOLT_COUNT; i++) {
int reg = ADT7462_REG_VOLT(data, i);
if (!reg)
data->voltages[i] = 0;
else
data->voltages[i] = i2c_smbus_read_byte_data(client,
reg);
}
data->alarms[0] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM1);
data->alarms[1] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM2);
data->alarms[2] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM3);
data->alarms[3] = i2c_smbus_read_byte_data(client, ADT7462_REG_ALARM4);
data->sensors_last_updated = local_jiffies;
data->sensors_valid = 1;
no_sensor_update:
if (time_before(local_jiffies, data->limits_last_updated +
LIMIT_REFRESH_INTERVAL)
&& data->limits_valid)
goto out;
for (i = 0; i < ADT7462_TEMP_COUNT; i++) {
data->temp_min[i] = i2c_smbus_read_byte_data(client,
ADT7462_TEMP_MIN_REG(i));
data->temp_max[i] = i2c_smbus_read_byte_data(client,
ADT7462_TEMP_MAX_REG(i));
}
for (i = 0; i < ADT7462_FAN_COUNT; i++)
data->fan_min[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_FAN_MIN(i));
for (i = 0; i < ADT7462_VOLT_COUNT; i++) {
int reg = ADT7462_REG_VOLT_MAX(data, i);
data->volt_max[i] =
(reg ? i2c_smbus_read_byte_data(client, reg) : 0);
reg = ADT7462_REG_VOLT_MIN(data, i);
data->volt_min[i] =
(reg ? i2c_smbus_read_byte_data(client, reg) : 0);
}
for (i = 0; i < ADT7462_PWM_COUNT; i++) {
data->pwm_min[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PWM_MIN(i));
data->pwm_tmin[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PWM_TMIN(i));
data->pwm_trange[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PWM_TRANGE(i));
data->pwm_cfg[i] = i2c_smbus_read_byte_data(client,
ADT7462_REG_PWM_CFG(i));
}
data->pwm_max = i2c_smbus_read_byte_data(client, ADT7462_REG_PWM_MAX);
data->cfg2 = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2);
data->limits_last_updated = local_jiffies;
data->limits_valid = 1;
out:
mutex_unlock(&data->lock);
return data;
}
static ssize_t show_temp_min(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
if (!temp_enabled(data, attr->index))
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n", 1000 * (data->temp_min[attr->index] - 64));
}
static ssize_t set_temp_min(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_TEMP_MIN_REG(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_temp_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
if (!temp_enabled(data, attr->index))
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n", 1000 * (data->temp_max[attr->index] - 64));
}
static ssize_t set_temp_max(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_TEMP_MAX_REG(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
u8 frac = data->temp_frac[attr->index] >> TEMP_FRAC_OFFSET;
if (!temp_enabled(data, attr->index))
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n", 1000 * (data->temp[attr->index] - 64) +
250 * frac);
}
static ssize_t show_temp_label(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%s\n", temp_label(data, attr->index));
}
static ssize_t show_volt_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int x = voltage_multiplier(data, attr->index);
x *= data->volt_max[attr->index];
x /= 1000; /* convert from uV to mV */
return sprintf(buf, "%d\n", x);
}
static ssize_t set_volt_max(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
int x = voltage_multiplier(data, attr->index);
long temp;
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
i2c_smbus_write_byte_data(client,
ADT7462_REG_VOLT_MAX(data, attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_volt_min(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int x = voltage_multiplier(data, attr->index);
x *= data->volt_min[attr->index];
x /= 1000; /* convert from uV to mV */
return sprintf(buf, "%d\n", x);
}
static ssize_t set_volt_min(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
int x = voltage_multiplier(data, attr->index);
long temp;
if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
i2c_smbus_write_byte_data(client,
ADT7462_REG_VOLT_MIN(data, attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_voltage(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int x = voltage_multiplier(data, attr->index);
x *= data->voltages[attr->index];
x /= 1000; /* convert from uV to mV */
return sprintf(buf, "%d\n", x);
}
static ssize_t show_voltage_label(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%s\n", voltage_label(data, attr->index));
}
static ssize_t show_alarm(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int reg = attr->index >> ADT7462_ALARM_REG_SHIFT;
int mask = attr->index & ADT7462_ALARM_FLAG_MASK;
if (data->alarms[reg] & mask)
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static int fan_enabled(struct adt7462_data *data, int fan)
{
return data->fan_enabled & (1 << fan);
}
static ssize_t show_fan_min(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
u16 temp;
/* Only the MSB of the min fan period is stored... */
temp = data->fan_min[attr->index];
temp <<= 8;
if (!fan_enabled(data, attr->index) ||
!FAN_DATA_VALID(temp))
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n", FAN_PERIOD_TO_RPM(temp));
}
static ssize_t set_fan_min(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp) || !temp ||
!fan_enabled(data, attr->index))
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
temp >>= 8;
temp = SENSORS_LIMIT(temp, 1, 255);
mutex_lock(&data->lock);
data->fan_min[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_FAN_MIN(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
if (!fan_enabled(data, attr->index) ||
!FAN_DATA_VALID(data->fan[attr->index]))
return sprintf(buf, "0\n");
return sprintf(buf, "%d\n",
FAN_PERIOD_TO_RPM(data->fan[attr->index]));
}
static ssize_t show_force_pwm_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", (data->cfg2 & ADT7462_FSPD_MASK ? 1 : 0));
}
static ssize_t set_force_pwm_max(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
u8 reg;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
reg = i2c_smbus_read_byte_data(client, ADT7462_REG_CFG2);
if (temp)
reg |= ADT7462_FSPD_MASK;
else
reg &= ~ADT7462_FSPD_MASK;
data->cfg2 = reg;
i2c_smbus_write_byte_data(client, ADT7462_REG_CFG2, reg);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", data->pwm[attr->index]);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM(attr->index), temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", data->pwm_max);
}
static ssize_t set_pwm_max(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_max = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MAX, temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_min(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", data->pwm_min[attr->index]);
}
static ssize_t set_pwm_min(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_min[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_MIN(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_hyst(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", 1000 *
(data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK));
}
static ssize_t set_pwm_hyst(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
temp = SENSORS_LIMIT(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
temp |= data->pwm_trange[attr->index] & ADT7462_PWM_RANGE_MASK;
mutex_lock(&data->lock);
data->pwm_trange[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_tmax(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
/* tmax = tmin + trange */
int trange = trange_values[data->pwm_trange[attr->index] >>
ADT7462_PWM_RANGE_SHIFT];
int tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
return sprintf(buf, "%d\n", tmin + trange);
}
static ssize_t set_pwm_tmax(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
int temp;
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
int tmin, trange_value;
long trange;
if (kstrtol(buf, 10, &trange))
return -EINVAL;
/* trange = tmax - tmin */
tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
trange_value = find_trange_value(trange - tmin);
if (trange_value < 0)
return -EINVAL;
temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
mutex_lock(&data->lock);
data->pwm_trange[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TRANGE(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_tmin(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
return sprintf(buf, "%d\n", 1000 * (data->pwm_tmin[attr->index] - 64));
}
static ssize_t set_pwm_tmin(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
temp = SENSORS_LIMIT(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_TMIN(attr->index),
temp);
mutex_unlock(&data->lock);
return count;
}
static ssize_t show_pwm_auto(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int cfg = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT;
switch (cfg) {
case 4: /* off */
return sprintf(buf, "0\n");
case 7: /* manual */
return sprintf(buf, "1\n");
default: /* automatic */
return sprintf(buf, "2\n");
}
}
static void set_pwm_channel(struct i2c_client *client,
struct adt7462_data *data,
int which,
int value)
{
int temp = data->pwm_cfg[which] & ~ADT7462_PWM_CHANNEL_MASK;
temp |= value << ADT7462_PWM_CHANNEL_SHIFT;
mutex_lock(&data->lock);
data->pwm_cfg[which] = temp;
i2c_smbus_write_byte_data(client, ADT7462_REG_PWM_CFG(which), temp);
mutex_unlock(&data->lock);
}
static ssize_t set_pwm_auto(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
switch (temp) {
case 0: /* off */
set_pwm_channel(client, data, attr->index, 4);
return count;
case 1: /* manual */
set_pwm_channel(client, data, attr->index, 7);
return count;
default:
return -EINVAL;
}
}
static ssize_t show_pwm_auto_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adt7462_data *data = adt7462_update_device(dev);
int channel = data->pwm_cfg[attr->index] >> ADT7462_PWM_CHANNEL_SHIFT;
switch (channel) {
case 0: /* temp[1234] only */
case 1:
case 2:
case 3:
return sprintf(buf, "%d\n", (1 << channel));
case 5: /* temp1 & temp4 */
return sprintf(buf, "9\n");
case 6:
return sprintf(buf, "15\n");
default:
return sprintf(buf, "0\n");
}
}
static int cvt_auto_temp(int input)
{
if (input == 0xF)
return 6;
if (input == 0x9)
return 5;
if (input < 1 || !is_power_of_2(input))
return -EINVAL;
return ilog2(input);
}
static ssize_t set_pwm_auto_temp(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
if (temp < 0)
return temp;
set_pwm_channel(client, data, attr->index, temp);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 2);
static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 3);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 1);
static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 2);
static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 3);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM1 | ADT7462_LT_ALARM);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM1 | ADT7462_R1T_ALARM);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM1 | ADT7462_R2T_ALARM);
static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM1 | ADT7462_R3T_ALARM);
static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 0);
static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 1);
static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 2);
static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 3);
static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 4);
static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 5);
static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 6);
static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 7);
static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 8);
static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 9);
static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 10);
static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 11);
static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO, show_volt_max,
set_volt_max, 12);
static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 0);
static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 1);
static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 2);
static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 3);
static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 4);
static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 5);
static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 6);
static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 7);
static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 8);
static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 9);
static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 10);
static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 11);
static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO, show_volt_min,
set_volt_min, 12);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 1);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 2);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 3);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 4);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 5);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 6);
static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_voltage, NULL, 7);
static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_voltage, NULL, 8);
static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_voltage, NULL, 9);
static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_voltage, NULL, 10);
static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_voltage, NULL, 11);
static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_voltage, NULL, 12);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_voltage_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_voltage_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_voltage_label, NULL, 2);
static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_voltage_label, NULL, 3);
static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_voltage_label, NULL, 4);
static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_voltage_label, NULL, 5);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_voltage_label, NULL, 6);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_voltage_label, NULL, 7);
static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_voltage_label, NULL, 8);
static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_voltage_label, NULL, 9);
static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_voltage_label, NULL, 10);
static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_voltage_label, NULL, 11);
static SENSOR_DEVICE_ATTR(in13_label, S_IRUGO, show_voltage_label, NULL, 12);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V0_ALARM);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V7_ALARM);
static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V2_ALARM);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V6_ALARM);
static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V5_ALARM);
static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V4_ALARM);
static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V3_ALARM);
static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM2 | ADT7462_V1_ALARM);
static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM3 | ADT7462_V10_ALARM);
static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM3 | ADT7462_V9_ALARM);
static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM3 | ADT7462_V8_ALARM);
static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM3 | ADT7462_V11_ALARM);
static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM3 | ADT7462_V12_ALARM);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 0);
static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 1);
static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 2);
static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 3);
static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 4);
static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 5);
static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 6);
static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO, show_fan_min,
set_fan_min, 7);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4);
static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 5);
static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 6);
static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan, NULL, 7);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F0_ALARM);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F1_ALARM);
static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F2_ALARM);
static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F3_ALARM);
static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F4_ALARM);
static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F5_ALARM);
static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F6_ALARM);
static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL,
ADT7462_ALARM4 | ADT7462_F7_ALARM);
static SENSOR_DEVICE_ATTR(force_pwm_max, S_IWUSR | S_IRUGO,
show_force_pwm_max, set_force_pwm_max, 0);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 0);
static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 1);
static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 2);
static SENSOR_DEVICE_ATTR(pwm4_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm_min, set_pwm_min, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
show_pwm_max, set_pwm_max, 0);
static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
show_pwm_max, set_pwm_max, 1);
static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
show_pwm_max, set_pwm_max, 2);
static SENSOR_DEVICE_ATTR(pwm4_auto_point2_pwm, S_IWUSR | S_IRUGO,
show_pwm_max, set_pwm_max, 3);
static SENSOR_DEVICE_ATTR(temp1_auto_point1_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 0);
static SENSOR_DEVICE_ATTR(temp2_auto_point1_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 1);
static SENSOR_DEVICE_ATTR(temp3_auto_point1_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 2);
static SENSOR_DEVICE_ATTR(temp4_auto_point1_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 3);
static SENSOR_DEVICE_ATTR(temp1_auto_point2_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 0);
static SENSOR_DEVICE_ATTR(temp2_auto_point2_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 1);
static SENSOR_DEVICE_ATTR(temp3_auto_point2_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 2);
static SENSOR_DEVICE_ATTR(temp4_auto_point2_hyst, S_IWUSR | S_IRUGO,
show_pwm_hyst, set_pwm_hyst, 3);
static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IWUSR | S_IRUGO,
show_pwm_tmin, set_pwm_tmin, 0);
static SENSOR_DEVICE_ATTR(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
show_pwm_tmin, set_pwm_tmin, 1);
static SENSOR_DEVICE_ATTR(temp3_auto_point1_temp, S_IWUSR | S_IRUGO,
show_pwm_tmin, set_pwm_tmin, 2);
static SENSOR_DEVICE_ATTR(temp4_auto_point1_temp, S_IWUSR | S_IRUGO,
show_pwm_tmin, set_pwm_tmin, 3);
static SENSOR_DEVICE_ATTR(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
show_pwm_tmax, set_pwm_tmax, 0);
static SENSOR_DEVICE_ATTR(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
show_pwm_tmax, set_pwm_tmax, 1);
static SENSOR_DEVICE_ATTR(temp3_auto_point2_temp, S_IWUSR | S_IRUGO,
show_pwm_tmax, set_pwm_tmax, 2);
static SENSOR_DEVICE_ATTR(temp4_auto_point2_temp, S_IWUSR | S_IRUGO,
show_pwm_tmax, set_pwm_tmax, 3);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
set_pwm_auto, 0);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
set_pwm_auto, 1);
static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
set_pwm_auto, 2);
static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_auto,
set_pwm_auto, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IWUSR | S_IRUGO,
show_pwm_auto_temp, set_pwm_auto_temp, 0);
static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IWUSR | S_IRUGO,
show_pwm_auto_temp, set_pwm_auto_temp, 1);
static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IWUSR | S_IRUGO,
show_pwm_auto_temp, set_pwm_auto_temp, 2);
static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IWUSR | S_IRUGO,
show_pwm_auto_temp, set_pwm_auto_temp, 3);
static struct attribute *adt7462_attr[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp4_min.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp1_label.dev_attr.attr,
&sensor_dev_attr_temp2_label.dev_attr.attr,
&sensor_dev_attr_temp3_label.dev_attr.attr,
&sensor_dev_attr_temp4_label.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_alarm.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in8_max.dev_attr.attr,
&sensor_dev_attr_in9_max.dev_attr.attr,
&sensor_dev_attr_in10_max.dev_attr.attr,
&sensor_dev_attr_in11_max.dev_attr.attr,
&sensor_dev_attr_in12_max.dev_attr.attr,
&sensor_dev_attr_in13_max.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in8_min.dev_attr.attr,
&sensor_dev_attr_in9_min.dev_attr.attr,
&sensor_dev_attr_in10_min.dev_attr.attr,
&sensor_dev_attr_in11_min.dev_attr.attr,
&sensor_dev_attr_in12_min.dev_attr.attr,
&sensor_dev_attr_in13_min.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
&sensor_dev_attr_in10_input.dev_attr.attr,
&sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_in12_input.dev_attr.attr,
&sensor_dev_attr_in13_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
&sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_in3_label.dev_attr.attr,
&sensor_dev_attr_in4_label.dev_attr.attr,
&sensor_dev_attr_in5_label.dev_attr.attr,
&sensor_dev_attr_in6_label.dev_attr.attr,
&sensor_dev_attr_in7_label.dev_attr.attr,
&sensor_dev_attr_in8_label.dev_attr.attr,
&sensor_dev_attr_in9_label.dev_attr.attr,
&sensor_dev_attr_in10_label.dev_attr.attr,
&sensor_dev_attr_in11_label.dev_attr.attr,
&sensor_dev_attr_in12_label.dev_attr.attr,
&sensor_dev_attr_in13_label.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
&sensor_dev_attr_in8_alarm.dev_attr.attr,
&sensor_dev_attr_in9_alarm.dev_attr.attr,
&sensor_dev_attr_in10_alarm.dev_attr.attr,
&sensor_dev_attr_in11_alarm.dev_attr.attr,
&sensor_dev_attr_in12_alarm.dev_attr.attr,
&sensor_dev_attr_in13_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan7_min.dev_attr.attr,
&sensor_dev_attr_fan8_min.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan5_input.dev_attr.attr,
&sensor_dev_attr_fan6_input.dev_attr.attr,
&sensor_dev_attr_fan7_input.dev_attr.attr,
&sensor_dev_attr_fan8_input.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
&sensor_dev_attr_fan7_alarm.dev_attr.attr,
&sensor_dev_attr_fan8_alarm.dev_attr.attr,
&sensor_dev_attr_force_pwm_max.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
&sensor_dev_attr_pwm4.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm4_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm4_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point1_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point1_hyst.dev_attr.attr,
&sensor_dev_attr_temp4_auto_point1_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point2_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point2_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point2_hyst.dev_attr.attr,
&sensor_dev_attr_temp4_auto_point2_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp4_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp4_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm4_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
&sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr,
NULL
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int vendor, device, revision;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
vendor = i2c_smbus_read_byte_data(client, ADT7462_REG_VENDOR);
if (vendor != ADT7462_VENDOR)
return -ENODEV;
device = i2c_smbus_read_byte_data(client, ADT7462_REG_DEVICE);
if (device != ADT7462_DEVICE)
return -ENODEV;
revision = i2c_smbus_read_byte_data(client, ADT7462_REG_REVISION);
if (revision != ADT7462_REVISION)
return -ENODEV;
strlcpy(info->type, "adt7462", I2C_NAME_SIZE);
return 0;
}
static int adt7462_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adt7462_data *data;
int err;
data = kzalloc(sizeof(struct adt7462_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
dev_info(&client->dev, "%s chip found\n", client->name);
/* Register sysfs hooks */
data->attrs.attrs = adt7462_attr;
err = sysfs_create_group(&client->dev.kobj, &data->attrs);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &data->attrs);
exit_free:
kfree(data);
exit:
return err;
}
static int adt7462_remove(struct i2c_client *client)
{
struct adt7462_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &data->attrs);
kfree(data);
return 0;
}
module_i2c_driver(adt7462_driver);
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
MODULE_DESCRIPTION("ADT7462 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
naitik2012/Xperia-L-Kernel | fs/hostfs/hostfs_user.c | 4940 | 7300 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdio.h>
#include <stddef.h>
#include <unistd.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include "hostfs.h"
#include "os.h"
#include <utime.h>
static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
{
p->ino = buf->st_ino;
p->mode = buf->st_mode;
p->nlink = buf->st_nlink;
p->uid = buf->st_uid;
p->gid = buf->st_gid;
p->size = buf->st_size;
p->atime.tv_sec = buf->st_atime;
p->atime.tv_nsec = 0;
p->ctime.tv_sec = buf->st_ctime;
p->ctime.tv_nsec = 0;
p->mtime.tv_sec = buf->st_mtime;
p->mtime.tv_nsec = 0;
p->blksize = buf->st_blksize;
p->blocks = buf->st_blocks;
p->maj = os_major(buf->st_rdev);
p->min = os_minor(buf->st_rdev);
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
struct stat64 buf;
if (fd >= 0) {
if (fstat64(fd, &buf) < 0)
return -errno;
} else if (lstat64(path, &buf) < 0) {
return -errno;
}
stat64_to_hostfs(&buf, p);
return 0;
}
int access_file(char *path, int r, int w, int x)
{
int mode = 0;
if (r)
mode = R_OK;
if (w)
mode |= W_OK;
if (x)
mode |= X_OK;
if (access(path, mode) != 0)
return -errno;
else return 0;
}
int open_file(char *path, int r, int w, int append)
{
int mode = 0, fd;
if (r && !w)
mode = O_RDONLY;
else if (!r && w)
mode = O_WRONLY;
else if (r && w)
mode = O_RDWR;
else panic("Impossible mode in open_file");
if (append)
mode |= O_APPEND;
fd = open64(path, mode);
if (fd < 0)
return -errno;
else return fd;
}
void *open_dir(char *path, int *err_out)
{
DIR *dir;
dir = opendir(path);
*err_out = errno;
return dir;
}
char *read_dir(void *stream, unsigned long long *pos,
unsigned long long *ino_out, int *len_out,
unsigned int *type_out)
{
DIR *dir = stream;
struct dirent *ent;
seekdir(dir, *pos);
ent = readdir(dir);
if (ent == NULL)
return NULL;
*len_out = strlen(ent->d_name);
*ino_out = ent->d_ino;
*type_out = ent->d_type;
*pos = telldir(dir);
return ent->d_name;
}
int read_file(int fd, unsigned long long *offset, char *buf, int len)
{
int n;
n = pread64(fd, buf, len, *offset);
if (n < 0)
return -errno;
*offset += n;
return n;
}
int write_file(int fd, unsigned long long *offset, const char *buf, int len)
{
int n;
n = pwrite64(fd, buf, len, *offset);
if (n < 0)
return -errno;
*offset += n;
return n;
}
int lseek_file(int fd, long long offset, int whence)
{
int ret;
ret = lseek64(fd, offset, whence);
if (ret < 0)
return -errno;
return 0;
}
int fsync_file(int fd, int datasync)
{
int ret;
if (datasync)
ret = fdatasync(fd);
else
ret = fsync(fd);
if (ret < 0)
return -errno;
return 0;
}
int replace_file(int oldfd, int fd)
{
return dup2(oldfd, fd);
}
void close_file(void *stream)
{
close(*((int *) stream));
}
void close_dir(void *stream)
{
closedir(stream);
}
int file_create(char *name, int ur, int uw, int ux, int gr,
int gw, int gx, int or, int ow, int ox)
{
int mode, fd;
mode = 0;
mode |= ur ? S_IRUSR : 0;
mode |= uw ? S_IWUSR : 0;
mode |= ux ? S_IXUSR : 0;
mode |= gr ? S_IRGRP : 0;
mode |= gw ? S_IWGRP : 0;
mode |= gx ? S_IXGRP : 0;
mode |= or ? S_IROTH : 0;
mode |= ow ? S_IWOTH : 0;
mode |= ox ? S_IXOTH : 0;
fd = open64(name, O_CREAT | O_RDWR, mode);
if (fd < 0)
return -errno;
return fd;
}
int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
{
struct hostfs_stat st;
struct timeval times[2];
int err, ma;
if (attrs->ia_valid & HOSTFS_ATTR_MODE) {
if (fd >= 0) {
if (fchmod(fd, attrs->ia_mode) != 0)
return -errno;
} else if (chmod(file, attrs->ia_mode) != 0) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_UID) {
if (fd >= 0) {
if (fchown(fd, attrs->ia_uid, -1))
return -errno;
} else if (chown(file, attrs->ia_uid, -1)) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_GID) {
if (fd >= 0) {
if (fchown(fd, -1, attrs->ia_gid))
return -errno;
} else if (chown(file, -1, attrs->ia_gid)) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_SIZE) {
if (fd >= 0) {
if (ftruncate(fd, attrs->ia_size))
return -errno;
} else if (truncate(file, attrs->ia_size)) {
return -errno;
}
}
/*
* Update accessed and/or modified time, in two parts: first set
* times according to the changes to perform, and then call futimes()
* or utimes() to apply them.
*/
ma = (HOSTFS_ATTR_ATIME_SET | HOSTFS_ATTR_MTIME_SET);
if (attrs->ia_valid & ma) {
err = stat_file(file, &st, fd);
if (err != 0)
return err;
times[0].tv_sec = st.atime.tv_sec;
times[0].tv_usec = st.atime.tv_nsec / 1000;
times[1].tv_sec = st.mtime.tv_sec;
times[1].tv_usec = st.mtime.tv_nsec / 1000;
if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) {
times[0].tv_sec = attrs->ia_atime.tv_sec;
times[0].tv_usec = attrs->ia_atime.tv_nsec / 1000;
}
if (attrs->ia_valid & HOSTFS_ATTR_MTIME_SET) {
times[1].tv_sec = attrs->ia_mtime.tv_sec;
times[1].tv_usec = attrs->ia_mtime.tv_nsec / 1000;
}
if (fd >= 0) {
if (futimes(fd, times) != 0)
return -errno;
} else if (utimes(file, times) != 0) {
return -errno;
}
}
/* Note: ctime is not handled */
if (attrs->ia_valid & (HOSTFS_ATTR_ATIME | HOSTFS_ATTR_MTIME)) {
err = stat_file(file, &st, fd);
attrs->ia_atime = st.atime;
attrs->ia_mtime = st.mtime;
if (err != 0)
return err;
}
return 0;
}
int make_symlink(const char *from, const char *to)
{
int err;
err = symlink(to, from);
if (err)
return -errno;
return 0;
}
int unlink_file(const char *file)
{
int err;
err = unlink(file);
if (err)
return -errno;
return 0;
}
int do_mkdir(const char *file, int mode)
{
int err;
err = mkdir(file, mode);
if (err)
return -errno;
return 0;
}
int do_rmdir(const char *file)
{
int err;
err = rmdir(file);
if (err)
return -errno;
return 0;
}
int do_mknod(const char *file, int mode, unsigned int major, unsigned int minor)
{
int err;
err = mknod(file, mode, os_makedev(major, minor));
if (err)
return -errno;
return 0;
}
int link_file(const char *to, const char *from)
{
int err;
err = link(to, from);
if (err)
return -errno;
return 0;
}
int hostfs_do_readlink(char *file, char *buf, int size)
{
int n;
n = readlink(file, buf, size);
if (n < 0)
return -errno;
if (n < size)
buf[n] = '\0';
return n;
}
int rename_file(char *from, char *to)
{
int err;
err = rename(from, to);
if (err < 0)
return -errno;
return 0;
}
int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
void *fsid_out, int fsid_size, long *namelen_out)
{
struct statfs64 buf;
int err;
err = statfs64(root, &buf);
if (err < 0)
return -errno;
*bsize_out = buf.f_bsize;
*blocks_out = buf.f_blocks;
*bfree_out = buf.f_bfree;
*bavail_out = buf.f_bavail;
*files_out = buf.f_files;
*ffree_out = buf.f_ffree;
memcpy(fsid_out, &buf.f_fsid,
sizeof(buf.f_fsid) > fsid_size ? fsid_size :
sizeof(buf.f_fsid));
*namelen_out = buf.f_namelen;
return 0;
}
| gpl-2.0 |
kaihua/Simple_kernel_jlo | drivers/hwmon/ad7418.c | 4940 | 7795 | /*
* An hwmon driver for the Analog Devices AD7416/17/18
* Copyright (C) 2006-07 Tower Technologies
*
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* Based on lm75.c
* Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License,
* as published by the Free Software Foundation - version 2.
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "lm75.h"
#define DRV_VERSION "0.4"
enum chips { ad7416, ad7417, ad7418 };
/* AD7418 registers */
#define AD7418_REG_TEMP_IN 0x00
#define AD7418_REG_CONF 0x01
#define AD7418_REG_TEMP_HYST 0x02
#define AD7418_REG_TEMP_OS 0x03
#define AD7418_REG_ADC 0x04
#define AD7418_REG_CONF2 0x05
#define AD7418_REG_ADC_CH(x) ((x) << 5)
#define AD7418_CH_TEMP AD7418_REG_ADC_CH(0)
static const u8 AD7418_REG_TEMP[] = { AD7418_REG_TEMP_IN,
AD7418_REG_TEMP_HYST,
AD7418_REG_TEMP_OS };
struct ad7418_data {
struct device *hwmon_dev;
struct attribute_group attrs;
enum chips type;
struct mutex lock;
int adc_max; /* number of ADC channels */
char valid;
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values */
u16 in[4];
};
static int ad7418_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int ad7418_remove(struct i2c_client *client);
static const struct i2c_device_id ad7418_id[] = {
{ "ad7416", ad7416 },
{ "ad7417", ad7417 },
{ "ad7418", ad7418 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad7418_id);
static struct i2c_driver ad7418_driver = {
.driver = {
.name = "ad7418",
},
.probe = ad7418_probe,
.remove = ad7418_remove,
.id_table = ad7418_id,
};
static void ad7418_init_client(struct i2c_client *client)
{
struct ad7418_data *data = i2c_get_clientdata(client);
int reg = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
if (reg < 0) {
dev_err(&client->dev, "cannot read configuration register\n");
} else {
dev_info(&client->dev, "configuring for mode 1\n");
i2c_smbus_write_byte_data(client, AD7418_REG_CONF, reg & 0xfe);
if (data->type == ad7417 || data->type == ad7418)
i2c_smbus_write_byte_data(client,
AD7418_REG_CONF2, 0x00);
}
}
static struct ad7418_data *ad7418_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ad7418_data *data = i2c_get_clientdata(client);
mutex_lock(&data->lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
u8 cfg;
int i, ch;
/* read config register and clear channel bits */
cfg = i2c_smbus_read_byte_data(client, AD7418_REG_CONF);
cfg &= 0x1F;
i2c_smbus_write_byte_data(client, AD7418_REG_CONF,
cfg | AD7418_CH_TEMP);
udelay(30);
for (i = 0; i < 3; i++) {
data->temp[i] =
i2c_smbus_read_word_swapped(client,
AD7418_REG_TEMP[i]);
}
for (i = 0, ch = 4; i < data->adc_max; i++, ch--) {
i2c_smbus_write_byte_data(client,
AD7418_REG_CONF,
cfg | AD7418_REG_ADC_CH(ch));
udelay(15);
data->in[data->adc_max - 1 - i] =
i2c_smbus_read_word_swapped(client,
AD7418_REG_ADC);
}
/* restore old configuration value */
i2c_smbus_write_word_swapped(client, AD7418_REG_CONF, cfg);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->lock);
return data;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct ad7418_data *data = ad7418_update_device(dev);
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
static ssize_t show_adc(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct ad7418_data *data = ad7418_update_device(dev);
return sprintf(buf, "%d\n",
((data->in[attr->index] >> 6) * 2500 + 512) / 1024);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct ad7418_data *data = i2c_get_clientdata(client);
long temp;
int ret = kstrtol(buf, 10, &temp);
if (ret < 0)
return ret;
mutex_lock(&data->lock);
data->temp[attr->index] = LM75_TEMP_TO_REG(temp);
i2c_smbus_write_word_swapped(client,
AD7418_REG_TEMP[attr->index],
data->temp[attr->index]);
mutex_unlock(&data->lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
show_temp, set_temp, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_temp, set_temp, 2);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 0);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 1);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 2);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 3);
static struct attribute *ad7416_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL
};
static struct attribute *ad7417_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
NULL
};
static struct attribute *ad7418_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
NULL
};
static int ad7418_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
struct ad7418_data *data;
int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA)) {
err = -EOPNOTSUPP;
goto exit;
}
data = kzalloc(sizeof(struct ad7418_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
data->type = id->driver_data;
switch (data->type) {
case ad7416:
data->adc_max = 0;
data->attrs.attrs = ad7416_attributes;
break;
case ad7417:
data->adc_max = 4;
data->attrs.attrs = ad7417_attributes;
break;
case ad7418:
data->adc_max = 1;
data->attrs.attrs = ad7418_attributes;
break;
}
dev_info(&client->dev, "%s chip found\n", client->name);
/* Initialize the AD7418 chip */
ad7418_init_client(client);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &data->attrs);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &data->attrs);
exit_free:
kfree(data);
exit:
return err;
}
static int ad7418_remove(struct i2c_client *client)
{
struct ad7418_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &data->attrs);
kfree(data);
return 0;
}
module_i2c_driver(ad7418_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("AD7416/17/18 driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
faust93/franco_geehrc_f93 | drivers/sh/intc/chip.c | 4940 | 5479 | /*
* IRQ chip definitions for INTC IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpumask.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include "internals.h"
void _intc_enable(struct irq_data *data, unsigned long handle)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long addr;
unsigned int cpu;
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, data->affinity))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
intc_balancing_enable(irq);
}
static void intc_enable(struct irq_data *data)
{
_intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data));
}
static void intc_disable(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data);
unsigned long addr;
unsigned int cpu;
intc_balancing_disable(irq);
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, data->affinity))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
[_INTC_FN(handle)], irq);
}
}
#ifdef CONFIG_SMP
/*
* This is held with the irq desc lock held, so we don't require any
* additional locking here at the intc desc level. The affinity mask is
* later tested in the enable/disable paths.
*/
static int intc_set_affinity(struct irq_data *data,
const struct cpumask *cpumask,
bool force)
{
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
cpumask_copy(data->affinity, cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
#endif
static void intc_mask_ack(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = intc_get_ack_handle(irq);
unsigned long addr;
intc_disable(data);
/* read register and write zero only to the associated bit */
if (handle) {
unsigned int value;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
value = intc_set_field_from_handle(0, 1, handle);
switch (_INTC_FN(handle)) {
case REG_FN_MODIFY_BASE + 0: /* 8bit */
__raw_readb(addr);
__raw_writeb(0xff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 1: /* 16bit */
__raw_readw(addr);
__raw_writew(0xffff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 3: /* 32bit */
__raw_readl(addr);
__raw_writel(0xffffffff ^ value, addr);
break;
default:
BUG();
break;
}
}
}
static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
unsigned int nr_hp,
unsigned int irq)
{
struct intc_handle_int key;
key.irq = irq;
key.handle = 0;
return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp);
}
int intc_set_priority(unsigned int irq, unsigned int prio)
{
struct intc_desc_int *d = get_intc_desc(irq);
struct irq_data *data = irq_get_irq_data(irq);
struct intc_handle_int *ihp;
if (!intc_get_prio_level(irq) || prio <= 1)
return -EINVAL;
ihp = intc_find_irq(d->prio, d->nr_prio, irq);
if (ihp) {
if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
intc_set_prio_level(irq, prio);
/*
* only set secondary masking method directly
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(data, ihp->handle);
}
return 0;
}
#define SENSE_VALID_FLAG 0x80
#define VALID(x) (x | SENSE_VALID_FLAG)
static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
[IRQ_TYPE_EDGE_FALLING] = VALID(0),
[IRQ_TYPE_EDGE_RISING] = VALID(1),
[IRQ_TYPE_LEVEL_LOW] = VALID(2),
/* SH7706, SH7707 and SH7709 do not support high level triggered */
#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
!defined(CONFIG_CPU_SUBTYPE_SH7707) && \
!defined(CONFIG_CPU_SUBTYPE_SH7709)
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
#endif
#if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */
[IRQ_TYPE_EDGE_BOTH] = VALID(4),
#endif
};
static int intc_set_type(struct irq_data *data, unsigned int type)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
struct intc_handle_int *ihp;
unsigned long addr;
if (!value)
return -EINVAL;
value &= ~SENSE_VALID_FLAG;
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
if (ihp) {
/* PINT has 2-bit sense registers, should fail on EDGE_BOTH */
if (value >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
}
return 0;
}
struct irq_chip intc_irq_chip = {
.irq_mask = intc_disable,
.irq_unmask = intc_enable,
.irq_mask_ack = intc_mask_ack,
.irq_enable = intc_enable,
.irq_disable = intc_disable,
.irq_set_type = intc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = intc_set_affinity,
#endif
.flags = IRQCHIP_SKIP_SET_WAKE,
};
| gpl-2.0 |
Nico60/kernel_lge_hammerhead | drivers/media/video/tcm825x.c | 5196 | 22018 | /*
* drivers/media/video/tcm825x.c
*
* TCM825X camera sensor driver.
*
* Copyright (C) 2007 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@nokia.com>
*
* Based on code from David Cohen <david.cohen@indt.org.br>
*
* This driver was based on ov9640 sensor driver from MontaVista
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/v4l2-int-device.h>
#include "tcm825x.h"
/*
* The sensor has two fps modes: the lower one just gives half the fps
* at the same xclk than the high one.
*/
#define MAX_FPS 30
#define MIN_FPS 8
#define MAX_HALF_FPS (MAX_FPS / 2)
#define HIGH_FPS_MODE_LOWER_LIMIT 14
#define DEFAULT_FPS MAX_HALF_FPS
struct tcm825x_sensor {
const struct tcm825x_platform_data *platform_data;
struct v4l2_int_device *v4l2_int_device;
struct i2c_client *i2c_client;
struct v4l2_pix_format pix;
struct v4l2_fract timeperframe;
};
/* list of image formats supported by TCM825X sensor */
static const struct v4l2_fmtdesc tcm825x_formats[] = {
{
.description = "YUYV (YUV 4:2:2), packed",
.pixelformat = V4L2_PIX_FMT_UYVY,
}, {
/* Note: V4L2 defines RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
*
* We interpret RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
*/
.description = "RGB565, le",
.pixelformat = V4L2_PIX_FMT_RGB565,
},
};
#define TCM825X_NUM_CAPTURE_FORMATS ARRAY_SIZE(tcm825x_formats)
/*
* TCM825X register configuration for all combinations of pixel format and
* image size
*/
static const struct tcm825x_reg subqcif = { 0x20, TCM825X_PICSIZ };
static const struct tcm825x_reg qcif = { 0x18, TCM825X_PICSIZ };
static const struct tcm825x_reg cif = { 0x14, TCM825X_PICSIZ };
static const struct tcm825x_reg qqvga = { 0x0c, TCM825X_PICSIZ };
static const struct tcm825x_reg qvga = { 0x04, TCM825X_PICSIZ };
static const struct tcm825x_reg vga = { 0x00, TCM825X_PICSIZ };
static const struct tcm825x_reg yuv422 = { 0x00, TCM825X_PICFMT };
static const struct tcm825x_reg rgb565 = { 0x02, TCM825X_PICFMT };
/* Our own specific controls */
#define V4L2_CID_ALC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_H_EDGE_EN V4L2_CID_PRIVATE_BASE + 1
#define V4L2_CID_V_EDGE_EN V4L2_CID_PRIVATE_BASE + 2
#define V4L2_CID_LENS V4L2_CID_PRIVATE_BASE + 3
#define V4L2_CID_MAX_EXPOSURE_TIME V4L2_CID_PRIVATE_BASE + 4
#define V4L2_CID_LAST_PRIV V4L2_CID_MAX_EXPOSURE_TIME
/* Video controls */
static struct vcontrol {
struct v4l2_queryctrl qc;
u16 reg;
u16 start_bit;
} video_control[] = {
{
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
.maximum = 63,
.step = 1,
},
.reg = TCM825X_AG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = 0,
.maximum = 255,
.step = 1,
},
.reg = TCM825X_MRG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = 0,
.maximum = 255,
.step = 1,
},
.reg = TCM825X_MBG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto White Balance",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_AWBSW,
.start_bit = 7,
},
{
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure Time",
.minimum = 0,
.maximum = 0x1fff,
.step = 1,
},
.reg = TCM825X_ESRSPD_U,
.start_bit = 0,
},
{
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Mirror Image",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_H_INV,
.start_bit = 6,
},
{
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Vertical Flip",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_V_INV,
.start_bit = 7,
},
/* Private controls */
{
{
.id = V4L2_CID_ALC,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto Luminance Control",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_ALCSW,
.start_bit = 7,
},
{
{
.id = V4L2_CID_H_EDGE_EN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Horizontal Edge Enhancement",
.minimum = 0,
.maximum = 0xff,
.step = 1,
},
.reg = TCM825X_HDTG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_V_EDGE_EN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Vertical Edge Enhancement",
.minimum = 0,
.maximum = 0xff,
.step = 1,
},
.reg = TCM825X_VDTG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_LENS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Lens Shading Compensation",
.minimum = 0,
.maximum = 0x3f,
.step = 1,
},
.reg = TCM825X_LENS,
.start_bit = 0,
},
{
{
.id = V4L2_CID_MAX_EXPOSURE_TIME,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Maximum Exposure Time",
.minimum = 0,
.maximum = 0x3,
.step = 1,
},
.reg = TCM825X_ESRLIM,
.start_bit = 5,
},
};
static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] =
{ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] =
{ &yuv422, &rgb565 };
/*
* Read a value from a register in an TCM825X sensor device. The value is
* returned in 'val'.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_read_reg(struct i2c_client *client, int reg)
{
int err;
struct i2c_msg msg[2];
u8 reg_buf, data_buf = 0;
if (!client->adapter)
return -ENODEV;
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = ®_buf;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].len = 1;
msg[1].buf = &data_buf;
reg_buf = reg;
err = i2c_transfer(client->adapter, msg, 2);
if (err < 0)
return err;
return data_buf;
}
/*
* Write a value to a register in an TCM825X sensor device.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_write_reg(struct i2c_client *client, u8 reg, u8 val)
{
int err;
struct i2c_msg msg[1];
unsigned char data[2];
if (!client->adapter)
return -ENODEV;
msg->addr = client->addr;
msg->flags = 0;
msg->len = 2;
msg->buf = data;
data[0] = reg;
data[1] = val;
err = i2c_transfer(client->adapter, msg, 1);
if (err >= 0)
return 0;
return err;
}
static int __tcm825x_write_reg_mask(struct i2c_client *client,
u8 reg, u8 val, u8 mask)
{
int rc;
/* need to do read - modify - write */
rc = tcm825x_read_reg(client, reg);
if (rc < 0)
return rc;
rc &= (~mask); /* Clear the masked bits */
val &= mask; /* Enforce mask on value */
val |= rc;
/* write the new value to the register */
rc = tcm825x_write_reg(client, reg, val);
if (rc)
return rc;
return 0;
}
#define tcm825x_write_reg_mask(client, regmask, val) \
__tcm825x_write_reg_mask(client, TCM825X_ADDR((regmask)), val, \
TCM825X_MASK((regmask)))
/*
* Initialize a list of TCM825X registers.
* The list of registers is terminated by the pair of values
* { TCM825X_REG_TERM, TCM825X_VAL_TERM }.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_write_default_regs(struct i2c_client *client,
const struct tcm825x_reg *reglist)
{
int err;
const struct tcm825x_reg *next = reglist;
while (!((next->reg == TCM825X_REG_TERM)
&& (next->val == TCM825X_VAL_TERM))) {
err = tcm825x_write_reg(client, next->reg, next->val);
if (err) {
dev_err(&client->dev, "register writing failed\n");
return err;
}
next++;
}
return 0;
}
static struct vcontrol *find_vctrl(int id)
{
int i;
if (id < V4L2_CID_BASE)
return NULL;
for (i = 0; i < ARRAY_SIZE(video_control); i++)
if (video_control[i].qc.id == id)
return &video_control[i];
return NULL;
}
/*
* Find the best match for a requested image capture size. The best match
* is chosen as the nearest match that has the same number or fewer pixels
* as the requested size, or the smallest image size if the requested size
* has fewer pixels than the smallest image.
*/
static enum image_size tcm825x_find_size(struct v4l2_int_device *s,
unsigned int width,
unsigned int height)
{
enum image_size isize;
unsigned long pixels = width * height;
struct tcm825x_sensor *sensor = s->priv;
for (isize = subQCIF; isize < VGA; isize++) {
if (tcm825x_sizes[isize + 1].height
* tcm825x_sizes[isize + 1].width > pixels) {
dev_dbg(&sensor->i2c_client->dev, "size %d\n", isize);
return isize;
}
}
dev_dbg(&sensor->i2c_client->dev, "format default VGA\n");
return VGA;
}
/*
* Configure the TCM825X for current image size, pixel format, and
* frame period. fper is the frame period (in seconds) expressed as a
* fraction. Returns zero if successful, or non-zero otherwise. The
* actual frame period is returned in fper.
*/
static int tcm825x_configure(struct v4l2_int_device *s)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_pix_format *pix = &sensor->pix;
enum image_size isize = tcm825x_find_size(s, pix->width, pix->height);
struct v4l2_fract *fper = &sensor->timeperframe;
enum pixel_format pfmt;
int err;
u32 tgt_fps;
u8 val;
/* common register initialization */
err = tcm825x_write_default_regs(
sensor->i2c_client, sensor->platform_data->default_regs());
if (err)
return err;
/* configure image size */
val = tcm825x_siz_reg[isize]->val;
dev_dbg(&sensor->i2c_client->dev,
"configuring image size %d\n", isize);
err = tcm825x_write_reg_mask(sensor->i2c_client,
tcm825x_siz_reg[isize]->reg, val);
if (err)
return err;
/* configure pixel format */
switch (pix->pixelformat) {
default:
case V4L2_PIX_FMT_RGB565:
pfmt = RGB565;
break;
case V4L2_PIX_FMT_UYVY:
pfmt = YUV422;
break;
}
dev_dbg(&sensor->i2c_client->dev,
"configuring pixel format %d\n", pfmt);
val = tcm825x_fmt_reg[pfmt]->val;
err = tcm825x_write_reg_mask(sensor->i2c_client,
tcm825x_fmt_reg[pfmt]->reg, val);
if (err)
return err;
/*
* For frame rate < 15, the FPS reg (addr 0x02, bit 7) must be
* set. Frame rate will be halved from the normal.
*/
tgt_fps = fper->denominator / fper->numerator;
if (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) {
val = tcm825x_read_reg(sensor->i2c_client, 0x02);
val |= 0x80;
tcm825x_write_reg(sensor->i2c_client, 0x02, val);
}
return 0;
}
static int ioctl_queryctrl(struct v4l2_int_device *s,
struct v4l2_queryctrl *qc)
{
struct vcontrol *control;
control = find_vctrl(qc->id);
if (control == NULL)
return -EINVAL;
*qc = control->qc;
return 0;
}
static int ioctl_g_ctrl(struct v4l2_int_device *s,
struct v4l2_control *vc)
{
struct tcm825x_sensor *sensor = s->priv;
struct i2c_client *client = sensor->i2c_client;
int val, r;
struct vcontrol *lvc;
/* exposure time is special, spread across 2 registers */
if (vc->id == V4L2_CID_EXPOSURE) {
int val_lower, val_upper;
val_upper = tcm825x_read_reg(client,
TCM825X_ADDR(TCM825X_ESRSPD_U));
if (val_upper < 0)
return val_upper;
val_lower = tcm825x_read_reg(client,
TCM825X_ADDR(TCM825X_ESRSPD_L));
if (val_lower < 0)
return val_lower;
vc->value = ((val_upper & 0x1f) << 8) | (val_lower);
return 0;
}
lvc = find_vctrl(vc->id);
if (lvc == NULL)
return -EINVAL;
r = tcm825x_read_reg(client, TCM825X_ADDR(lvc->reg));
if (r < 0)
return r;
val = r & TCM825X_MASK(lvc->reg);
val >>= lvc->start_bit;
if (val < 0)
return val;
if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
val ^= sensor->platform_data->is_upside_down();
vc->value = val;
return 0;
}
static int ioctl_s_ctrl(struct v4l2_int_device *s,
struct v4l2_control *vc)
{
struct tcm825x_sensor *sensor = s->priv;
struct i2c_client *client = sensor->i2c_client;
struct vcontrol *lvc;
int val = vc->value;
/* exposure time is special, spread across 2 registers */
if (vc->id == V4L2_CID_EXPOSURE) {
int val_lower, val_upper;
val_lower = val & TCM825X_MASK(TCM825X_ESRSPD_L);
val_upper = (val >> 8) & TCM825X_MASK(TCM825X_ESRSPD_U);
if (tcm825x_write_reg_mask(client,
TCM825X_ESRSPD_U, val_upper))
return -EIO;
if (tcm825x_write_reg_mask(client,
TCM825X_ESRSPD_L, val_lower))
return -EIO;
return 0;
}
lvc = find_vctrl(vc->id);
if (lvc == NULL)
return -EINVAL;
if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
val ^= sensor->platform_data->is_upside_down();
val = val << lvc->start_bit;
if (tcm825x_write_reg_mask(client, lvc->reg, val))
return -EIO;
return 0;
}
static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
switch (fmt->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (index >= TCM825X_NUM_CAPTURE_FORMATS)
return -EINVAL;
break;
default:
return -EINVAL;
}
fmt->flags = tcm825x_formats[index].flags;
strlcpy(fmt->description, tcm825x_formats[index].description,
sizeof(fmt->description));
fmt->pixelformat = tcm825x_formats[index].pixelformat;
return 0;
}
static int ioctl_try_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
enum image_size isize;
int ifmt;
struct v4l2_pix_format *pix = &f->fmt.pix;
isize = tcm825x_find_size(s, pix->width, pix->height);
dev_dbg(&sensor->i2c_client->dev, "isize = %d num_capture = %lu\n",
isize, (unsigned long)TCM825X_NUM_CAPTURE_FORMATS);
pix->width = tcm825x_sizes[isize].width;
pix->height = tcm825x_sizes[isize].height;
for (ifmt = 0; ifmt < TCM825X_NUM_CAPTURE_FORMATS; ifmt++)
if (pix->pixelformat == tcm825x_formats[ifmt].pixelformat)
break;
if (ifmt == TCM825X_NUM_CAPTURE_FORMATS)
ifmt = 0; /* Default = YUV 4:2:2 */
pix->pixelformat = tcm825x_formats[ifmt].pixelformat;
pix->field = V4L2_FIELD_NONE;
pix->bytesperline = pix->width * TCM825X_BYTES_PER_PIXEL;
pix->sizeimage = pix->bytesperline * pix->height;
pix->priv = 0;
dev_dbg(&sensor->i2c_client->dev, "format = 0x%08x\n",
pix->pixelformat);
switch (pix->pixelformat) {
case V4L2_PIX_FMT_UYVY:
default:
pix->colorspace = V4L2_COLORSPACE_JPEG;
break;
case V4L2_PIX_FMT_RGB565:
pix->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
return 0;
}
static int ioctl_s_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
int rval;
rval = ioctl_try_fmt_cap(s, f);
if (rval)
return rval;
rval = tcm825x_configure(s);
sensor->pix = *pix;
return rval;
}
static int ioctl_g_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
f->fmt.pix = sensor->pix;
return 0;
}
static int ioctl_g_parm(struct v4l2_int_device *s,
struct v4l2_streamparm *a)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_captureparm *cparm = &a->parm.capture;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(a, 0, sizeof(*a));
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cparm->capability = V4L2_CAP_TIMEPERFRAME;
cparm->timeperframe = sensor->timeperframe;
return 0;
}
static int ioctl_s_parm(struct v4l2_int_device *s,
struct v4l2_streamparm *a)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
u32 tgt_fps; /* target frames per secound */
int rval;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if ((timeperframe->numerator == 0)
|| (timeperframe->denominator == 0)) {
timeperframe->denominator = DEFAULT_FPS;
timeperframe->numerator = 1;
}
tgt_fps = timeperframe->denominator / timeperframe->numerator;
if (tgt_fps > MAX_FPS) {
timeperframe->denominator = MAX_FPS;
timeperframe->numerator = 1;
} else if (tgt_fps < MIN_FPS) {
timeperframe->denominator = MIN_FPS;
timeperframe->numerator = 1;
}
sensor->timeperframe = *timeperframe;
rval = tcm825x_configure(s);
return rval;
}
static int ioctl_s_power(struct v4l2_int_device *s, int on)
{
struct tcm825x_sensor *sensor = s->priv;
return sensor->platform_data->power_set(on);
}
/*
* Given the image capture format in pix, the nominal frame period in
* timeperframe, calculate the required xclk frequency.
*
* TCM825X input frequency characteristics are:
* Minimum 11.9 MHz, Typical 24.57 MHz and maximum 25/27 MHz
*/
static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_fract *timeperframe = &sensor->timeperframe;
u32 tgt_xclk; /* target xclk */
u32 tgt_fps; /* target frames per secound */
int rval;
rval = sensor->platform_data->ifparm(p);
if (rval)
return rval;
tgt_fps = timeperframe->denominator / timeperframe->numerator;
tgt_xclk = (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) ?
(2457 * tgt_fps) / MAX_HALF_FPS :
(2457 * tgt_fps) / MAX_FPS;
tgt_xclk *= 10000;
tgt_xclk = min(tgt_xclk, (u32)TCM825X_XCLK_MAX);
tgt_xclk = max(tgt_xclk, (u32)TCM825X_XCLK_MIN);
p->u.bt656.clock_curr = tgt_xclk;
return 0;
}
static int ioctl_g_needs_reset(struct v4l2_int_device *s, void *buf)
{
struct tcm825x_sensor *sensor = s->priv;
return sensor->platform_data->needs_reset(s, buf, &sensor->pix);
}
static int ioctl_reset(struct v4l2_int_device *s)
{
return -EBUSY;
}
static int ioctl_init(struct v4l2_int_device *s)
{
return tcm825x_configure(s);
}
static int ioctl_dev_exit(struct v4l2_int_device *s)
{
return 0;
}
static int ioctl_dev_init(struct v4l2_int_device *s)
{
struct tcm825x_sensor *sensor = s->priv;
int r;
r = tcm825x_read_reg(sensor->i2c_client, 0x01);
if (r < 0)
return r;
if (r == 0) {
dev_err(&sensor->i2c_client->dev, "device not detected\n");
return -EIO;
}
return 0;
}
static struct v4l2_int_ioctl_desc tcm825x_ioctl_desc[] = {
{ vidioc_int_dev_init_num,
(v4l2_int_ioctl_func *)ioctl_dev_init },
{ vidioc_int_dev_exit_num,
(v4l2_int_ioctl_func *)ioctl_dev_exit },
{ vidioc_int_s_power_num,
(v4l2_int_ioctl_func *)ioctl_s_power },
{ vidioc_int_g_ifparm_num,
(v4l2_int_ioctl_func *)ioctl_g_ifparm },
{ vidioc_int_g_needs_reset_num,
(v4l2_int_ioctl_func *)ioctl_g_needs_reset },
{ vidioc_int_reset_num,
(v4l2_int_ioctl_func *)ioctl_reset },
{ vidioc_int_init_num,
(v4l2_int_ioctl_func *)ioctl_init },
{ vidioc_int_enum_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
{ vidioc_int_try_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_try_fmt_cap },
{ vidioc_int_g_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
{ vidioc_int_s_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_s_fmt_cap },
{ vidioc_int_g_parm_num,
(v4l2_int_ioctl_func *)ioctl_g_parm },
{ vidioc_int_s_parm_num,
(v4l2_int_ioctl_func *)ioctl_s_parm },
{ vidioc_int_queryctrl_num,
(v4l2_int_ioctl_func *)ioctl_queryctrl },
{ vidioc_int_g_ctrl_num,
(v4l2_int_ioctl_func *)ioctl_g_ctrl },
{ vidioc_int_s_ctrl_num,
(v4l2_int_ioctl_func *)ioctl_s_ctrl },
};
static struct v4l2_int_slave tcm825x_slave = {
.ioctls = tcm825x_ioctl_desc,
.num_ioctls = ARRAY_SIZE(tcm825x_ioctl_desc),
};
static struct tcm825x_sensor tcm825x;
static struct v4l2_int_device tcm825x_int_device = {
.module = THIS_MODULE,
.name = TCM825X_NAME,
.priv = &tcm825x,
.type = v4l2_int_type_slave,
.u = {
.slave = &tcm825x_slave,
},
};
static int tcm825x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct tcm825x_sensor *sensor = &tcm825x;
if (i2c_get_clientdata(client))
return -EBUSY;
sensor->platform_data = client->dev.platform_data;
if (sensor->platform_data == NULL
|| !sensor->platform_data->is_okay())
return -ENODEV;
sensor->v4l2_int_device = &tcm825x_int_device;
sensor->i2c_client = client;
i2c_set_clientdata(client, sensor);
/* Make the default capture format QVGA RGB565 */
sensor->pix.width = tcm825x_sizes[QVGA].width;
sensor->pix.height = tcm825x_sizes[QVGA].height;
sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565;
return v4l2_int_device_register(sensor->v4l2_int_device);
}
static int tcm825x_remove(struct i2c_client *client)
{
struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
if (!client->adapter)
return -ENODEV; /* our client isn't attached */
v4l2_int_device_unregister(sensor->v4l2_int_device);
return 0;
}
static const struct i2c_device_id tcm825x_id[] = {
{ "tcm825x", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tcm825x_id);
static struct i2c_driver tcm825x_i2c_driver = {
.driver = {
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
.remove = tcm825x_remove,
.id_table = tcm825x_id,
};
static struct tcm825x_sensor tcm825x = {
.timeperframe = {
.numerator = 1,
.denominator = DEFAULT_FPS,
},
};
static int __init tcm825x_init(void)
{
int rval;
rval = i2c_add_driver(&tcm825x_i2c_driver);
if (rval)
printk(KERN_INFO "%s: failed registering " TCM825X_NAME "\n",
__func__);
return rval;
}
static void __exit tcm825x_exit(void)
{
i2c_del_driver(&tcm825x_i2c_driver);
}
/*
* FIXME: Menelaus isn't ready (?) at module_init stage, so use
* late_initcall for now.
*/
late_initcall(tcm825x_init);
module_exit(tcm825x_exit);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
MODULE_DESCRIPTION("TCM825x camera sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
talnoah/m7-sense | drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c | 8012 | 59439 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data.com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
| Project : APCI-3XXX | Compiler : GCC |
| Module name : hwdrv_apci3xxx.c| Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: S. Weber | Date : 15/09/2005 |
+-----------------------------------------------------------------------+
| Description :APCI3XXX Module. Hardware abstraction Layer for APCI3XXX|
+-----------------------------------------------------------------------+
| UPDATE'S |
+-----------------------------------------------------------------------+
| Date | Author | Description of updates |
+----------+-----------+------------------------------------------------+
| | | |
| | | |
+----------+-----------+------------------------------------------------+
*/
#include "hwdrv_apci3xxx.h"
/*
+----------------------------------------------------------------------------+
| ANALOG INPUT FUNCTIONS |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_TestConversionStarted |
| (struct comedi_device *dev) |
+----------------------------------------------------------------------------+
| Task Test if any conversion started |
+----------------------------------------------------------------------------+
| Input Parameters : - |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0 : Conversion not started |
| 1 : Conversion started |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_TestConversionStarted(struct comedi_device *dev)
{
if ((readl(devpriv->dw_AiBase + 8) & 0x80000UL) == 0x80000UL)
return 1;
else
return 0;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_AnalogInputConfigOperatingMode |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task Converting mode and convert time selection |
+----------------------------------------------------------------------------+
| Input Parameters : b_SingleDiff = (unsigned char) data[1]; |
| b_TimeBase = (unsigned char) data[2]; (0: ns, 1:micros 2:ms)|
| dw_ReloadValue = (unsigned int) data[3]; |
| ........ |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :>0 : No error |
| -1 : Single/Diff selection error |
| -2 : Convert time base unity selection error |
| -3 : Convert time value selection error |
| -10: Any conversion started |
| .... |
| -100 : Config command error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_TimeBase = 0;
unsigned char b_SingleDiff = 0;
unsigned int dw_ReloadValue = 0;
unsigned int dw_TestReloadValue = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n == 4) {
/****************************/
/* Get the Singel/Diff flag */
/****************************/
b_SingleDiff = (unsigned char) data[1];
/****************************/
/* Get the time base unitiy */
/****************************/
b_TimeBase = (unsigned char) data[2];
/*************************************/
/* Get the convert time reload value */
/*************************************/
dw_ReloadValue = (unsigned int) data[3];
/**********************/
/* Test the time base */
/**********************/
if ((this_board->b_AvailableConvertUnit & (1 << b_TimeBase)) !=
0) {
/*******************************/
/* Test the convert time value */
/*******************************/
if (dw_ReloadValue <= 65535) {
dw_TestReloadValue = dw_ReloadValue;
if (b_TimeBase == 1) {
dw_TestReloadValue =
dw_TestReloadValue * 1000UL;
}
if (b_TimeBase == 2) {
dw_TestReloadValue =
dw_TestReloadValue * 1000000UL;
}
/*******************************/
/* Test the convert time value */
/*******************************/
if (dw_TestReloadValue >=
devpriv->s_EeParameters.
ui_MinAcquisitiontimeNs) {
if ((b_SingleDiff == APCI3XXX_SINGLE)
|| (b_SingleDiff ==
APCI3XXX_DIFF)) {
if (((b_SingleDiff == APCI3XXX_SINGLE)
&& (devpriv->s_EeParameters.i_NbrAiChannel == 0))
|| ((b_SingleDiff == APCI3XXX_DIFF)
&& (this_board->i_NbrAiChannelDiff == 0))
) {
/*******************************/
/* Single/Diff selection error */
/*******************************/
printk("Single/Diff selection error\n");
i_ReturnValue = -1;
} else {
/**********************************/
/* Test if conversion not started */
/**********************************/
if (i_APCI3XXX_TestConversionStarted(dev) == 0) {
devpriv->
ui_EocEosConversionTime
=
(unsigned int)
dw_ReloadValue;
devpriv->
b_EocEosConversionTimeBase
=
b_TimeBase;
devpriv->
b_SingelDiff
=
b_SingleDiff;
devpriv->
b_AiInitialisation
= 1;
/*******************************/
/* Set the convert timing unit */
/*******************************/
writel((unsigned int)b_TimeBase,
devpriv->dw_AiBase + 36);
/**************************/
/* Set the convert timing */
/*************************/
writel(dw_ReloadValue, devpriv->dw_AiBase + 32);
} else {
/**************************/
/* Any conversion started */
/**************************/
printk("Any conversion started\n");
i_ReturnValue =
-10;
}
}
} else {
/*******************************/
/* Single/Diff selection error */
/*******************************/
printk("Single/Diff selection error\n");
i_ReturnValue = -1;
}
} else {
/************************/
/* Time selection error */
/************************/
printk("Convert time value selection error\n");
i_ReturnValue = -3;
}
} else {
/************************/
/* Time selection error */
/************************/
printk("Convert time value selection error\n");
i_ReturnValue = -3;
}
} else {
/*****************************/
/* Time base selection error */
/*****************************/
printk("Convert time base unity selection error\n");
i_ReturnValue = -2;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnConfigAnalogInput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task Converting mode and convert time selection |
+----------------------------------------------------------------------------+
| Input Parameters : b_ConvertMode = (unsigned char) data[0]; |
| b_TimeBase = (unsigned char) data[1]; (0: ns, 1:micros 2:ms)|
| dw_ReloadValue = (unsigned int) data[2]; |
| ........ |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :>0: No error |
| .... |
| -100 : Config command error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnConfigAnalogInput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
switch ((unsigned char) data[0]) {
case APCI3XXX_CONFIGURATION:
i_ReturnValue =
i_APCI3XXX_AnalogInputConfigOperatingMode(dev,
s, insn, data);
break;
default:
i_ReturnValue = -100;
printk("Config command error %d\n", data[0]);
break;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnReadAnalogInput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task Read 1 analog input |
+----------------------------------------------------------------------------+
| Input Parameters : b_Range = CR_RANGE(insn->chanspec); |
| b_Channel = CR_CHAN(insn->chanspec); |
| dw_NbrOfAcquisition = insn->n; |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :>0: No error |
| -3 : Channel selection error |
| -4 : Configuration selelection error |
| -10: Any conversion started |
| .... |
| -100 : Config command error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Configuration = (unsigned char) CR_RANGE(insn->chanspec);
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
unsigned int dw_Temp = 0;
unsigned int dw_Configuration = 0;
unsigned int dw_AcquisitionCpt = 0;
unsigned char b_Interrupt = 0;
/*************************************/
/* Test if operating mode configured */
/*************************************/
if (devpriv->b_AiInitialisation) {
/***************************/
/* Test the channel number */
/***************************/
if (((b_Channel < devpriv->s_EeParameters.i_NbrAiChannel)
&& (devpriv->b_SingelDiff == APCI3XXX_SINGLE))
|| ((b_Channel < this_board->i_NbrAiChannelDiff)
&& (devpriv->b_SingelDiff == APCI3XXX_DIFF))) {
/**********************************/
/* Test the channel configuration */
/**********************************/
if (b_Configuration > 7) {
/***************************/
/* Channel not initialised */
/***************************/
i_ReturnValue = -4;
printk("Channel %d range %d selection error\n",
b_Channel, b_Configuration);
}
} else {
/***************************/
/* Channel selection error */
/***************************/
i_ReturnValue = -3;
printk("Channel %d selection error\n", b_Channel);
}
/**************************/
/* Test if no error occur */
/**************************/
if (i_ReturnValue >= 0) {
/************************/
/* Test the buffer size */
/************************/
if ((b_Interrupt != 0) || ((b_Interrupt == 0)
&& (insn->n >= 1))) {
/**********************************/
/* Test if conversion not started */
/**********************************/
if (i_APCI3XXX_TestConversionStarted(dev) == 0) {
/******************/
/* Clear the FIFO */
/******************/
writel(0x10000UL, devpriv->dw_AiBase + 12);
/*******************************/
/* Get and save the delay mode */
/*******************************/
dw_Temp = readl(devpriv->dw_AiBase + 4);
dw_Temp = dw_Temp & 0xFFFFFEF0UL;
/***********************************/
/* Channel configuration selection */
/***********************************/
writel(dw_Temp, devpriv->dw_AiBase + 4);
/**************************/
/* Make the configuration */
/**************************/
dw_Configuration =
(b_Configuration & 3) |
((unsigned int) (b_Configuration >> 2)
<< 6) | ((unsigned int) devpriv->
b_SingelDiff << 7);
/***************************/
/* Write the configuration */
/***************************/
writel(dw_Configuration,
devpriv->dw_AiBase + 0);
/*********************/
/* Channel selection */
/*********************/
writel(dw_Temp | 0x100UL,
devpriv->dw_AiBase + 4);
writel((unsigned int) b_Channel,
devpriv->dw_AiBase + 0);
/***********************/
/* Restaure delay mode */
/***********************/
writel(dw_Temp, devpriv->dw_AiBase + 4);
/***********************************/
/* Set the number of sequence to 1 */
/***********************************/
writel(1, devpriv->dw_AiBase + 48);
/***************************/
/* Save the interrupt flag */
/***************************/
devpriv->b_EocEosInterrupt =
b_Interrupt;
/*******************************/
/* Save the number of channels */
/*******************************/
devpriv->ui_AiNbrofChannels = 1;
/******************************/
/* Test if interrupt not used */
/******************************/
if (b_Interrupt == 0) {
for (dw_AcquisitionCpt = 0;
dw_AcquisitionCpt <
insn->n;
dw_AcquisitionCpt++) {
/************************/
/* Start the conversion */
/************************/
writel(0x80000UL, devpriv->dw_AiBase + 8);
/****************/
/* Wait the EOS */
/****************/
do {
dw_Temp = readl(devpriv->dw_AiBase + 20);
dw_Temp = dw_Temp & 1;
} while (dw_Temp != 1);
/*************************/
/* Read the analog value */
/*************************/
data[dw_AcquisitionCpt] = (unsigned int)readl(devpriv->dw_AiBase + 28);
}
} else {
/************************/
/* Start the conversion */
/************************/
writel(0x180000UL, devpriv->dw_AiBase + 8);
}
} else {
/**************************/
/* Any conversion started */
/**************************/
printk("Any conversion started\n");
i_ReturnValue = -10;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
}
} else {
/***************************/
/* Channel selection error */
/***************************/
printk("Operating mode not configured\n");
i_ReturnValue = -1;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function name : void v_APCI3XXX_Interrupt (int irq, |
| void *d) |
+----------------------------------------------------------------------------+
| Task :Interrupt handler for APCI3XXX |
| When interrupt occurs this gets called. |
| First it finds which interrupt has been generated and |
| handles corresponding interrupt |
+----------------------------------------------------------------------------+
| Input Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : - |
+----------------------------------------------------------------------------+
*/
static void v_APCI3XXX_Interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned char b_CopyCpt = 0;
unsigned int dw_Status = 0;
/***************************/
/* Test if interrupt occur */
/***************************/
dw_Status = readl(devpriv->dw_AiBase + 16);
if ( (dw_Status & 0x2UL) == 0x2UL) {
/***********************/
/* Reset the interrupt */
/***********************/
writel(dw_Status, devpriv->dw_AiBase + 16);
/*****************************/
/* Test if interrupt enabled */
/*****************************/
if (devpriv->b_EocEosInterrupt == 1) {
/********************************/
/* Read all analog inputs value */
/********************************/
for (b_CopyCpt = 0;
b_CopyCpt < devpriv->ui_AiNbrofChannels;
b_CopyCpt++) {
devpriv->ui_AiReadData[b_CopyCpt] =
(unsigned int)readl(devpriv->dw_AiBase + 28);
}
/**************************/
/* Set the interrupt flag */
/**************************/
devpriv->b_EocEosInterrupt = 2;
/**********************************************/
/* Send a signal to from kernel to user space */
/**********************************************/
send_sig(SIGIO, devpriv->tsk_Current, 0);
}
}
}
/*
+----------------------------------------------------------------------------+
| ANALOG OUTPUT SUBDEVICE |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnWriteAnalogOutput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task Read 1 analog input |
+----------------------------------------------------------------------------+
| Input Parameters : b_Range = CR_RANGE(insn->chanspec); |
| b_Channel = CR_CHAN(insn->chanspec); |
| data[0] = analog value; |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :>0: No error |
| -3 : Channel selection error |
| -4 : Configuration selelection error |
| .... |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned char b_Range = (unsigned char) CR_RANGE(insn->chanspec);
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
unsigned int dw_Status = 0;
int i_ReturnValue = insn->n;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
/***************************/
/* Test the channel number */
/***************************/
if (b_Channel < devpriv->s_EeParameters.i_NbrAoChannel) {
/**********************************/
/* Test the channel configuration */
/**********************************/
if (b_Range < 2) {
/***************************/
/* Set the range selection */
/***************************/
writel(b_Range, devpriv->dw_AiBase + 96);
/**************************************************/
/* Write the analog value to the selected channel */
/**************************************************/
writel((data[0] << 8) | b_Channel,
devpriv->dw_AiBase + 100);
/****************************/
/* Wait the end of transfer */
/****************************/
do {
dw_Status = readl(devpriv->dw_AiBase + 96);
} while ((dw_Status & 0x100) != 0x100);
} else {
/***************************/
/* Channel not initialised */
/***************************/
i_ReturnValue = -4;
printk("Channel %d range %d selection error\n",
b_Channel, b_Range);
}
} else {
/***************************/
/* Channel selection error */
/***************************/
i_ReturnValue = -3;
printk("Channel %d selection error\n", b_Channel);
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| TTL FUNCTIONS |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnConfigInitTTLIO |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task You must calling this function be |
| for you call any other function witch access of TTL. |
| APCI3XXX_TTL_INIT_DIRECTION_PORT2(user inputs for direction)|
+----------------------------------------------------------------------------+
| Input Parameters : b_InitType = (unsigned char) data[0]; |
| b_Port2Mode = (unsigned char) data[1]; |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value :>0: No error |
| -1: Port 2 mode selection is wrong |
| .... |
| -100 : Config command error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnConfigInitTTLIO(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Command = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
/*******************/
/* Get the command */
/* **************** */
b_Command = (unsigned char) data[0];
/********************/
/* Test the command */
/********************/
if (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2) {
/***************************************/
/* Test the initialisation buffer size */
/***************************************/
if ((b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2)
&& (insn->n != 2)) {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
} else {
/************************/
/* Config command error */
/************************/
printk("Command selection error\n");
i_ReturnValue = -100;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
/*********************************************************************************/
/* Test if no error occur and APCI3XXX_TTL_INIT_DIRECTION_PORT2 command selected */
/*********************************************************************************/
if ((i_ReturnValue >= 0)
&& (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2)) {
/**********************/
/* Test the direction */
/**********************/
if ((data[1] == 0) || (data[1] == 0xFF)) {
/**************************/
/* Save the configuration */
/**************************/
devpriv->ul_TTLPortConfiguration[0] =
devpriv->ul_TTLPortConfiguration[0] | data[1];
} else {
/************************/
/* Port direction error */
/************************/
printk("Port 2 direction selection error\n");
i_ReturnValue = -1;
}
}
/**************************/
/* Test if no error occur */
/**************************/
if (i_ReturnValue >= 0) {
/***********************************/
/* Test if TTL port initilaisation */
/***********************************/
if (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2) {
/*************************/
/* Set the configuration */
/*************************/
outl(data[1], devpriv->iobase + 224);
}
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| TTL INPUT FUNCTIONS |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnBitsTTLIO |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Write the selected output mask and read the status from|
| all TTL channles |
+----------------------------------------------------------------------------+
| Input Parameters : dw_ChannelMask = data [0]; |
| dw_BitMask = data [1]; |
+----------------------------------------------------------------------------+
| Output Parameters : data[1] : All TTL channles states |
+----------------------------------------------------------------------------+
| Return Value : >0 : No error |
| -4 : Channel mask error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnBitsTTLIO(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_ChannelCpt = 0;
unsigned int dw_ChannelMask = 0;
unsigned int dw_BitMask = 0;
unsigned int dw_Status = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 2) {
/*******************************/
/* Get the channe and bit mask */
/*******************************/
dw_ChannelMask = data[0];
dw_BitMask = data[1];
/*************************/
/* Test the channel mask */
/*************************/
if (((dw_ChannelMask & 0XFF00FF00) == 0) &&
(((devpriv->ul_TTLPortConfiguration[0] & 0xFF) == 0xFF)
|| (((devpriv->ul_TTLPortConfiguration[0] &
0xFF) == 0)
&& ((dw_ChannelMask & 0XFF0000) ==
0)))) {
/*********************************/
/* Test if set/reset any channel */
/*********************************/
if (dw_ChannelMask) {
/****************************************/
/* Test if set/rest any port 0 channels */
/****************************************/
if (dw_ChannelMask & 0xFF) {
/*******************************************/
/* Read port 0 (first digital output port) */
/*******************************************/
dw_Status = inl(devpriv->iobase + 80);
for (b_ChannelCpt = 0; b_ChannelCpt < 8;
b_ChannelCpt++) {
if ((dw_ChannelMask >>
b_ChannelCpt) &
1) {
dw_Status =
(dw_Status &
(0xFF - (1 << b_ChannelCpt))) | (dw_BitMask & (1 << b_ChannelCpt));
}
}
outl(dw_Status, devpriv->iobase + 80);
}
/****************************************/
/* Test if set/rest any port 2 channels */
/****************************************/
if (dw_ChannelMask & 0xFF0000) {
dw_BitMask = dw_BitMask >> 16;
dw_ChannelMask = dw_ChannelMask >> 16;
/********************************************/
/* Read port 2 (second digital output port) */
/********************************************/
dw_Status = inl(devpriv->iobase + 112);
for (b_ChannelCpt = 0; b_ChannelCpt < 8;
b_ChannelCpt++) {
if ((dw_ChannelMask >>
b_ChannelCpt) &
1) {
dw_Status =
(dw_Status &
(0xFF - (1 << b_ChannelCpt))) | (dw_BitMask & (1 << b_ChannelCpt));
}
}
outl(dw_Status, devpriv->iobase + 112);
}
}
/*******************************************/
/* Read port 0 (first digital output port) */
/*******************************************/
data[1] = inl(devpriv->iobase + 80);
/******************************************/
/* Read port 1 (first digital input port) */
/******************************************/
data[1] = data[1] | (inl(devpriv->iobase + 64) << 8);
/************************/
/* Test if port 2 input */
/************************/
if ((devpriv->ul_TTLPortConfiguration[0] & 0xFF) == 0) {
data[1] =
data[1] | (inl(devpriv->iobase +
96) << 16);
} else {
data[1] =
data[1] | (inl(devpriv->iobase +
112) << 16);
}
} else {
/************************/
/* Config command error */
/************************/
printk("Channel mask error\n");
i_ReturnValue = -4;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnReadTTLIO |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read the status from selected channel |
+----------------------------------------------------------------------------+
| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
+----------------------------------------------------------------------------+
| Output Parameters : data[0] : Selected TTL channel state |
+----------------------------------------------------------------------------+
| Return Value : 0 : No error |
| -3 : Channel selection error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnReadTTLIO(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
int i_ReturnValue = insn->n;
unsigned int *pls_ReadData = data;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
/***********************/
/* Test if read port 0 */
/***********************/
if (b_Channel < 8) {
/*******************************************/
/* Read port 0 (first digital output port) */
/*******************************************/
pls_ReadData[0] = inl(devpriv->iobase + 80);
pls_ReadData[0] = (pls_ReadData[0] >> b_Channel) & 1;
} else {
/***********************/
/* Test if read port 1 */
/***********************/
if ((b_Channel > 7) && (b_Channel < 16)) {
/******************************************/
/* Read port 1 (first digital input port) */
/******************************************/
pls_ReadData[0] = inl(devpriv->iobase + 64);
pls_ReadData[0] =
(pls_ReadData[0] >> (b_Channel -
8)) & 1;
} else {
/***********************/
/* Test if read port 2 */
/***********************/
if ((b_Channel > 15) && (b_Channel < 24)) {
/************************/
/* Test if port 2 input */
/************************/
if ((devpriv->ul_TTLPortConfiguration[0]
& 0xFF) == 0) {
pls_ReadData[0] =
inl(devpriv->iobase +
96);
pls_ReadData[0] =
(pls_ReadData[0] >>
(b_Channel - 16)) & 1;
} else {
pls_ReadData[0] =
inl(devpriv->iobase +
112);
pls_ReadData[0] =
(pls_ReadData[0] >>
(b_Channel - 16)) & 1;
}
} else {
/***************************/
/* Channel selection error */
/***************************/
i_ReturnValue = -3;
printk("Channel %d selection error\n",
b_Channel);
}
}
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| TTL OUTPUT FUNCTIONS |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_InsnWriteTTLIO |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Set the state from TTL output channel |
+----------------------------------------------------------------------------+
| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
| b_State = data [0] |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0 : No error |
| -3 : Channel selection error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnWriteTTLIO(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
unsigned char b_State = 0;
unsigned int dw_Status = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
b_State = (unsigned char) data[0];
/***********************/
/* Test if read port 0 */
/***********************/
if (b_Channel < 8) {
/*****************************************************************************/
/* Read port 0 (first digital output port) and set/reset the selected channel */
/*****************************************************************************/
dw_Status = inl(devpriv->iobase + 80);
dw_Status =
(dw_Status & (0xFF -
(1 << b_Channel))) | ((b_State & 1) <<
b_Channel);
outl(dw_Status, devpriv->iobase + 80);
} else {
/***********************/
/* Test if read port 2 */
/***********************/
if ((b_Channel > 15) && (b_Channel < 24)) {
/*************************/
/* Test if port 2 output */
/*************************/
if ((devpriv->ul_TTLPortConfiguration[0] & 0xFF)
== 0xFF) {
/*****************************************************************************/
/* Read port 2 (first digital output port) and set/reset the selected channel */
/*****************************************************************************/
dw_Status = inl(devpriv->iobase + 112);
dw_Status =
(dw_Status & (0xFF -
(1 << (b_Channel -
16)))) |
((b_State & 1) << (b_Channel -
16));
outl(dw_Status, devpriv->iobase + 112);
} else {
/***************************/
/* Channel selection error */
/***************************/
i_ReturnValue = -3;
printk("Channel %d selection error\n",
b_Channel);
}
} else {
/***************************/
/* Channel selection error */
/***************************/
i_ReturnValue = -3;
printk("Channel %d selection error\n",
b_Channel);
}
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| DIGITAL INPUT SUBDEVICE |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function name :int i_APCI3XXX_InsnReadDigitalInput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Reads the value of the specified Digital input channel |
+----------------------------------------------------------------------------+
| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) (0 to 3) |
+----------------------------------------------------------------------------+
| Output Parameters : data[0] : Channel value |
+----------------------------------------------------------------------------+
| Return Value : 0 : No error |
| -3 : Channel selection error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnReadDigitalInput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
unsigned int dw_Temp = 0;
/***************************/
/* Test the channel number */
/***************************/
if (b_Channel <= devpriv->s_EeParameters.i_NbrDiChannel) {
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
dw_Temp = inl(devpriv->iobase + 32);
*data = (dw_Temp >> b_Channel) & 1;
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
} else {
/***************************/
/* Channel selection error */
/***************************/
printk("Channel selection error\n");
i_ReturnValue = -3;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function name :int i_APCI3XXX_InsnBitsDigitalInput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Reads the value of the Digital input Port i.e.4channels|
+----------------------------------------------------------------------------+
| Input Parameters : - |
+----------------------------------------------------------------------------+
| Output Parameters : data[0] : Port value |
+----------------------------------------------------------------------------+
| Return Value :>0: No error |
| .... |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnBitsDigitalInput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned int dw_Temp = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
dw_Temp = inl(devpriv->iobase + 32);
*data = dw_Temp & 0xf;
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| DIGITAL OUTPUT SUBDEVICE |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function name :int i_APCI3XXX_InsnBitsDigitalOutput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Write the selected output mask and read the status from|
| all digital output channles |
+----------------------------------------------------------------------------+
| Input Parameters : dw_ChannelMask = data [0]; |
| dw_BitMask = data [1]; |
+----------------------------------------------------------------------------+
| Output Parameters : data[1] : All digital output channles states |
+----------------------------------------------------------------------------+
| Return Value : >0 : No error |
| -4 : Channel mask error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnBitsDigitalOutput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_ChannelCpt = 0;
unsigned int dw_ChannelMask = 0;
unsigned int dw_BitMask = 0;
unsigned int dw_Status = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 2) {
/*******************************/
/* Get the channe and bit mask */
/*******************************/
dw_ChannelMask = data[0];
dw_BitMask = data[1];
/*************************/
/* Test the channel mask */
/*************************/
if ((dw_ChannelMask & 0XFFFFFFF0) == 0) {
/*********************************/
/* Test if set/reset any channel */
/*********************************/
if (dw_ChannelMask & 0xF) {
/********************************/
/* Read the digital output port */
/********************************/
dw_Status = inl(devpriv->iobase + 48);
for (b_ChannelCpt = 0; b_ChannelCpt < 4;
b_ChannelCpt++) {
if ((dw_ChannelMask >> b_ChannelCpt) &
1) {
dw_Status =
(dw_Status & (0xF -
(1 << b_ChannelCpt))) | (dw_BitMask & (1 << b_ChannelCpt));
}
}
outl(dw_Status, devpriv->iobase + 48);
}
/********************************/
/* Read the digital output port */
/********************************/
data[1] = inl(devpriv->iobase + 48);
} else {
/************************/
/* Config command error */
/************************/
printk("Channel mask error\n");
i_ReturnValue = -4;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function name :int i_APCI3XXX_InsnWriteDigitalOutput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Set the state from digital output channel |
+----------------------------------------------------------------------------+
| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
| b_State = data [0] |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : >0 : No error |
| -3 : Channel selection error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnWriteDigitalOutput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = CR_CHAN(insn->chanspec);
unsigned char b_State = 0;
unsigned int dw_Status = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
/***************************/
/* Test the channel number */
/***************************/
if (b_Channel < devpriv->s_EeParameters.i_NbrDoChannel) {
/*******************/
/* Get the command */
/*******************/
b_State = (unsigned char) data[0];
/********************************/
/* Read the digital output port */
/********************************/
dw_Status = inl(devpriv->iobase + 48);
dw_Status =
(dw_Status & (0xF -
(1 << b_Channel))) | ((b_State & 1) <<
b_Channel);
outl(dw_Status, devpriv->iobase + 48);
} else {
/***************************/
/* Channel selection error */
/***************************/
printk("Channel selection error\n");
i_ReturnValue = -3;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function name :int i_APCI3XXX_InsnReadDigitalOutput |
| (struct comedi_device *dev, |
| struct comedi_subdevice *s, |
| struct comedi_insn *insn, |
| unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read the state from digital output channel |
+----------------------------------------------------------------------------+
| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
+----------------------------------------------------------------------------+
| Output Parameters : b_State = data [0] |
+----------------------------------------------------------------------------+
| Return Value : >0 : No error |
| -3 : Channel selection error |
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_InsnReadDigitalOutput(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = CR_CHAN(insn->chanspec);
unsigned int dw_Status = 0;
/************************/
/* Test the buffer size */
/************************/
if (insn->n >= 1) {
/***************************/
/* Test the channel number */
/***************************/
if (b_Channel < devpriv->s_EeParameters.i_NbrDoChannel) {
/********************************/
/* Read the digital output port */
/********************************/
dw_Status = inl(devpriv->iobase + 48);
dw_Status = (dw_Status >> b_Channel) & 1;
*data = dw_Status;
} else {
/***************************/
/* Channel selection error */
/***************************/
printk("Channel selection error\n");
i_ReturnValue = -3;
}
} else {
/*******************/
/* Data size error */
/*******************/
printk("Buffer size error\n");
i_ReturnValue = -101;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI3XXX_Reset(struct comedi_device *dev) | +----------------------------------------------------------------------------+
| Task :resets all the registers |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : - |
+----------------------------------------------------------------------------+
*/
static int i_APCI3XXX_Reset(struct comedi_device *dev)
{
unsigned char b_Cpt = 0;
/*************************/
/* Disable the interrupt */
/*************************/
disable_irq(dev->irq);
/****************************/
/* Reset the interrupt flag */
/****************************/
devpriv->b_EocEosInterrupt = 0;
/***************************/
/* Clear the start command */
/***************************/
writel(0, devpriv->dw_AiBase + 8);
/*****************************/
/* Reset the interrupt flags */
/*****************************/
writel(readl(devpriv->dw_AiBase + 16), devpriv->dw_AiBase + 16);
/*****************/
/* clear the EOS */
/*****************/
readl(devpriv->dw_AiBase + 20);
/******************/
/* Clear the FIFO */
/******************/
for (b_Cpt = 0; b_Cpt < 16; b_Cpt++) {
readl(devpriv->dw_AiBase + 28);
}
/************************/
/* Enable the interrupt */
/************************/
enable_irq(dev->irq);
return 0;
}
| gpl-2.0 |
revjunkie/galbi-g2 | drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c | 8268 | 24566 | /******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
******************************************************************************/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "ieee80211.h"
struct modes_unit {
char *mode_string;
int mode_size;
};
struct modes_unit ieee80211_modes[] = {
{"a",1},
{"b",1},
{"g",1},
{"?",1},
{"N-24G",5},
{"N-5G",4},
};
#define iwe_stream_add_event_rsl iwe_stream_add_event
#define MAX_CUSTOM_LEN 64
static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
char *start, char *stop,
struct ieee80211_network *network,
struct iw_request_info *info)
{
char custom[MAX_CUSTOM_LEN];
char proto_name[IFNAMSIZ];
char *pname = proto_name;
char *p;
struct iw_event iwe;
int i, j;
u16 max_rate, rate;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Remaining entries will be displayed in the order we provide them */
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
// if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
} else {
iwe.u.data.length = min(network->ssid_len, (u8)32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
if(network->mode&(1<<i)) {
sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
pname +=ieee80211_modes[i].mode_size;
}
}
*pname = '\0';
snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
if (network->capability &
(WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
if (network->capability & WLAN_CAPABILITY_BSS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
iwe.u.freq.e = 3; */
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (network->capability & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
/* Add basic and extended rates */
max_rate = 0;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len; ) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
(network->rates[i] & 0x7F)))
rate = network->rates_ex[j++] & 0x7F;
else
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
}
if (network->mode >= IEEE_N_24G)//add N rate here;
{
PHT_CAPABILITY_ELE ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4))
ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[4];
else
ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[0];
is40M = (ht_cap->ChlWidth)?1:0;
isShortGI = (ht_cap->ChlWidth)?
((ht_cap->ShortGI40Mhz)?1:0):
((ht_cap->ShortGI20Mhz)?1:0);
max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f];
if (rate > max_rate)
max_rate = rate;
}
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = max_rate * 500000;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
IW_EV_PARAM_LEN);
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = network->stats.signal;
iwe.u.qual.level = network->stats.rssi;
iwe.u.qual.noise = network->stats.noise;
iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
iwe.u.qual.updated = 7;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN);
iwe.cmd = IWEVCUSTOM;
p = custom;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
#if (WIRELESS_EXT < 18)
if (ieee->wpa_enabled && network->wpa_ie_len){
char buf[MAX_WPA_IE_LEN * 2 + 30];
// printk("WPA IE\n");
u8 *p = buf;
p += sprintf(p, "wpa_ie=");
for (i = 0; i < network->wpa_ie_len; i++) {
p += sprintf(p, "%02x", network->wpa_ie[i]);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
if (ieee->wpa_enabled && network->rsn_ie_len){
char buf[MAX_WPA_IE_LEN * 2 + 30];
u8 *p = buf;
p += sprintf(p, "rsn_ie=");
for (i = 0; i < network->rsn_ie_len; i++) {
p += sprintf(p, "%02x", network->rsn_ie[i]);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
#else
memset(&iwe, 0, sizeof(iwe));
if (network->wpa_ie_len)
{
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->wpa_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len)
{
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->rsn_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
#endif
/* Add EXTRA: Age to display seconds since last beacon/probe response
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
return start;
}
int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct ieee80211_network *network;
unsigned long flags;
char *ev = extra;
// char *stop = ev + IW_SCAN_MAX_DATA;
char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
//char *stop = ev + IW_SCAN_MAX_DATA;
int i = 0;
int err = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
down(&ieee->wx_sem);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
i++;
if((stop-ev)<200)
{
err = -E2BIG;
break;
}
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
else
IEEE80211_DEBUG_SCAN(
"Not showing network '%s ("
"%pM)' due to age (%lums).\n",
escape_essid(network->ssid,
network->ssid_len),
network->bssid,
(jiffies - network->last_scanned) / (HZ / 100));
}
spin_unlock_irqrestore(&ieee->lock, flags);
up(&ieee->wx_sem);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
return err;
}
int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &(wrqu->encoding);
struct net_device *dev = ieee->dev;
struct ieee80211_security sec = {
.flags = 0
};
int i, key, key_provided, len;
struct ieee80211_crypt_data **crypt;
IEEE80211_DEBUG_WX("SET_ENCODE\n");
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
key = ieee->tx_keyidx;
}
IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
crypt = &ieee->crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
key);
ieee80211_crypt_delayed_deinit(ieee, crypt);
} else
IEEE80211_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all */
for (i = 0; i < WEP_KEYS; i++) {
if (ieee->crypt[i] != NULL) {
if (key_provided)
break;
ieee80211_crypt_delayed_deinit(
ieee, &ieee->crypt[i]);
}
}
if (i == WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
}
goto done;
}
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key */
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
if (*crypt == NULL) {
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
request_module("ieee80211_crypt_wep");
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
}
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(key);
if (!new_crypt->ops || !new_crypt->priv) {
kfree(new_crypt);
new_crypt = NULL;
printk(KERN_WARNING "%s: could not initialize WEP: "
"load module ieee80211_crypt_wep\n",
dev->name);
return -EOPNOTSUPP;
}
*crypt = new_crypt;
}
/* If a new key was provided, set it up */
if (erq->length > 0) {
len = erq->length <= 5 ? 5 : 13;
memcpy(sec.keys[key], keybuf, erq->length);
if (len > erq->length)
memset(sec.keys[key] + erq->length, 0,
len - erq->length);
IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
key, escape_essid(sec.keys[key], len),
erq->length, len);
sec.key_sizes[key] = len;
(*crypt)->ops->set_key(sec.keys[key], len, NULL,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
ieee->tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
NULL, (*crypt)->priv);
if (len == 0) {
/* Set a default key of all 0 */
printk("Setting key %d to all zero.\n",
key);
IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
key);
memset(sec.keys[key], 0, 13);
(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
(*crypt)->priv);
sec.key_sizes[key] = 13;
sec.flags |= (1 << key);
}
/* No key data - just set the default TX key index */
if (key_provided) {
IEEE80211_DEBUG_WX(
"Setting key %d to default Tx key.\n", key);
ieee->tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
}
done:
ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
sec.flags |= SEC_AUTH_MODE;
IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
"OPEN" : "SHARED KEY");
/* For now we just support WEP, so only set that security level...
* TODO: When WPA is added this is one place that needs to change */
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
if (ieee->set_security)
ieee->set_security(dev, &sec);
/* Do not reset port if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. If your hardware requires a reset after WEP
* configuration (for example... Prism2), implement the reset_port in
* the callbacks structures used to initialize the 802.11 stack. */
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
return -EINVAL;
}
return 0;
}
int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
struct ieee80211_crypt_data *crypt;
IEEE80211_DEBUG_WX("GET_ENCODE\n");
if(ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
} else
key = ieee->tx_keyidx;
crypt = ieee->crypt[key];
erq->flags = key + 1;
if (crypt == NULL || crypt->ops == NULL) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
erq->length = (len >= 0 ? len : 0);
erq->flags |= IW_ENCODE_ENABLED;
if (ieee->open_wep)
erq->flags |= IW_ENCODE_OPEN;
else
erq->flags |= IW_ENCODE_RESTRICTED;
return 0;
}
#if (WIRELESS_EXT >= 18)
int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct net_device *dev = ieee->dev;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int i, idx;
int group_key = 0;
const char *alg, *module;
struct ieee80211_crypto_ops *ops;
struct ieee80211_crypt_data **crypt;
struct ieee80211_security sec = {
.flags = 0,
};
//printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
return -EINVAL;
idx--;
} else
idx = ieee->tx_keyidx;
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
crypt = &ieee->crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
//printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg);
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
crypt = &ieee->crypt[idx];
else
return -EINVAL;
}
sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
ieee80211_crypt_delayed_deinit(ieee, crypt);
for (i = 0; i < WEP_KEYS; i++)
if (ieee->crypt[i] != NULL)
break;
if (i == WEP_KEYS) {
sec.enabled = 0;
// sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
}
//printk("disabled: flag:%x\n", encoding->flags);
goto done;
}
sec.enabled = 1;
// sec.encrypt = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
alg = "WEP";
module = "ieee80211_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
alg = "TKIP";
module = "ieee80211_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
alg = "CCMP";
module = "ieee80211_crypt_ccmp";
break;
default:
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
ret = -EINVAL;
goto done;
}
printk("alg name:%s\n",alg);
ops = ieee80211_get_crypto_ops(alg);
if (ops == NULL) {
request_module(module);
ops = ieee80211_get_crypto_ops(alg);
}
if (ops == NULL) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
printk("========>unknown crypto alg %d\n", ext->alg);
ret = -EINVAL;
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
struct ieee80211_crypt_data *new_crypt;
ieee80211_crypt_delayed_deinit(ieee, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(idx);
if (new_crypt->priv == NULL) {
kfree(new_crypt);
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
if (ext->key_len > 0 && (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
printk("key setting failed\n");
ret = -EINVAL;
goto done;
}
//skip_host_crypt:
//printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
//memcpy(sec.keys[idx], ext->key, ext->key_len);
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
// sec.encode_alg[idx] = SEC_ALG_WEP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
// sec.encode_alg[idx] = SEC_ALG_TKIP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
// sec.encode_alg[idx] = SEC_ALG_CCMP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
/* Don't set sec level for group keys. */
if (group_key)
sec.flags &= ~SEC_LEVEL;
}
done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
return -EINVAL;
}
return ret;
}
int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct ieee80211_crypt_data *crypt;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
if (max_key_len < 0)
return -EINVAL;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
return -EINVAL;
idx--;
} else
idx = ieee->tx_keyidx;
if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
ext->alg != IW_ENCODE_ALG_WEP)
if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
return -EINVAL;
crypt = ieee->crypt[idx];
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
if (crypt == NULL || crypt->ops == NULL ) {
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
if (strcmp(crypt->ops->name, "WEP") == 0 )
ext->alg = IW_ENCODE_ALG_WEP;
else if (strcmp(crypt->ops->name, "TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
else if (strcmp(crypt->ops->name, "CCMP"))
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN, NULL, crypt->priv);
encoding->flags |= IW_ENCODE_ENABLED;
if (ext->key_len &&
(ext->alg == IW_ENCODE_ALG_TKIP ||
ext->alg == IW_ENCODE_ALG_CCMP))
ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
}
return 0;
}
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
ieee80211_disassociate(ieee);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
/*need to support wpa2 here*/
//printk("wpa version:%x\n", data->value);
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/*
* * Host AP driver does not use these parameters and allows
* * wpa_supplicant to control them internally.
* */
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
ieee->tkip_countermeasures = data->value;
break;
case IW_AUTH_DROP_UNENCRYPTED:
ieee->drop_unencrypted = data->value;
break;
case IW_AUTH_80211_AUTH_ALG:
//printk("======>%s():data->value is %d\n",__FUNCTION__,data->value);
// ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
if(data->value & IW_AUTH_ALG_SHARED_KEY){
ieee->open_wep = 0;
ieee->auth_mode = 1;
}
else if(data->value & IW_AUTH_ALG_OPEN_SYSTEM){
ieee->open_wep = 1;
ieee->auth_mode = 0;
}
else if(data->value & IW_AUTH_ALG_LEAP){
ieee->open_wep = 1;
ieee->auth_mode = 2;
//printk("hahahaa:LEAP\n");
}
else
return -EINVAL;
//printk("open_wep:%d\n", ieee->open_wep);
break;
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value)?1:0;
//printk("enalbe wpa:%d\n", ieee->wpa_enabled);
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
ieee->ieee802_1x = data->value;
break;
case IW_AUTH_PRIVACY_INVOKED:
ieee->privacy_invoked = data->value;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
#endif
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
u8 *buf;
if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
{
// printk("return error out, len:%d\n", len);
return -EINVAL;
}
if (len)
{
if (len != ie[1]+2)
{
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
}
else{
kfree(ieee->wpa_ie);
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
return 0;
}
EXPORT_SYMBOL(ieee80211_wx_set_gen_ie);
#if (WIRELESS_EXT >= 18)
EXPORT_SYMBOL(ieee80211_wx_set_mlme);
EXPORT_SYMBOL(ieee80211_wx_set_auth);
EXPORT_SYMBOL(ieee80211_wx_set_encode_ext);
EXPORT_SYMBOL(ieee80211_wx_get_encode_ext);
#endif
EXPORT_SYMBOL(ieee80211_wx_get_scan);
EXPORT_SYMBOL(ieee80211_wx_set_encode);
EXPORT_SYMBOL(ieee80211_wx_get_encode);
| gpl-2.0 |
haiviet2011/android_kernel_ef47s | drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c | 8268 | 24566 | /******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
******************************************************************************/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "ieee80211.h"
struct modes_unit {
char *mode_string;
int mode_size;
};
struct modes_unit ieee80211_modes[] = {
{"a",1},
{"b",1},
{"g",1},
{"?",1},
{"N-24G",5},
{"N-5G",4},
};
#define iwe_stream_add_event_rsl iwe_stream_add_event
#define MAX_CUSTOM_LEN 64
static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
char *start, char *stop,
struct ieee80211_network *network,
struct iw_request_info *info)
{
char custom[MAX_CUSTOM_LEN];
char proto_name[IFNAMSIZ];
char *pname = proto_name;
char *p;
struct iw_event iwe;
int i, j;
u16 max_rate, rate;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Remaining entries will be displayed in the order we provide them */
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
// if (network->flags & NETWORK_EMPTY_ESSID) {
if (network->ssid_len == 0) {
iwe.u.data.length = sizeof("<hidden>");
start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
} else {
iwe.u.data.length = min(network->ssid_len, (u8)32);
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
if(network->mode&(1<<i)) {
sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
pname +=ieee80211_modes[i].mode_size;
}
}
*pname = '\0';
snprintf(iwe.u.name, IFNAMSIZ, "IEEE802.11%s", proto_name);
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_CHAR_LEN);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
if (network->capability &
(WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
if (network->capability & WLAN_CAPABILITY_BSS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
iwe.u.freq.e = 3; */
iwe.u.freq.m = network->channel;
iwe.u.freq.e = 0;
iwe.u.freq.i = 0;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_FREQ_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (network->capability & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
/* Add basic and extended rates */
max_rate = 0;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len; ) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
(network->rates[i] & 0x7F)))
rate = network->rates_ex[j++] & 0x7F;
else
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
}
if (network->mode >= IEEE_N_24G)//add N rate here;
{
PHT_CAPABILITY_ELE ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
if (!memcmp(network->bssht.bdHTCapBuf, EWC11NHTCap, 4))
ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[4];
else
ht_cap = (PHT_CAPABILITY_ELE)&network->bssht.bdHTCapBuf[0];
is40M = (ht_cap->ChlWidth)?1:0;
isShortGI = (ht_cap->ChlWidth)?
((ht_cap->ShortGI40Mhz)?1:0):
((ht_cap->ShortGI20Mhz)?1:0);
max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f];
if (rate > max_rate)
max_rate = rate;
}
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = max_rate * 500000;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe,
IW_EV_PARAM_LEN);
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
/* Add quality statistics */
/* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
iwe.u.qual.qual = network->stats.signal;
iwe.u.qual.level = network->stats.rssi;
iwe.u.qual.noise = network->stats.noise;
iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
iwe.u.qual.updated = 7;
start = iwe_stream_add_event_rsl(info, start, stop, &iwe, IW_EV_QUAL_LEN);
iwe.cmd = IWEVCUSTOM;
p = custom;
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
#if (WIRELESS_EXT < 18)
if (ieee->wpa_enabled && network->wpa_ie_len){
char buf[MAX_WPA_IE_LEN * 2 + 30];
// printk("WPA IE\n");
u8 *p = buf;
p += sprintf(p, "wpa_ie=");
for (i = 0; i < network->wpa_ie_len; i++) {
p += sprintf(p, "%02x", network->wpa_ie[i]);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
if (ieee->wpa_enabled && network->rsn_ie_len){
char buf[MAX_WPA_IE_LEN * 2 + 30];
u8 *p = buf;
p += sprintf(p, "rsn_ie=");
for (i = 0; i < network->rsn_ie_len; i++) {
p += sprintf(p, "%02x", network->rsn_ie[i]);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
#else
memset(&iwe, 0, sizeof(iwe));
if (network->wpa_ie_len)
{
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->wpa_ie, network->wpa_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->wpa_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
memset(&iwe, 0, sizeof(iwe));
if (network->rsn_ie_len)
{
char buf[MAX_WPA_IE_LEN];
memcpy(buf, network->rsn_ie, network->rsn_ie_len);
iwe.cmd = IWEVGENIE;
iwe.u.data.length = network->rsn_ie_len;
start = iwe_stream_add_point(info, start, stop, &iwe, buf);
}
#endif
/* Add EXTRA: Age to display seconds since last beacon/probe response
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(info, start, stop, &iwe, custom);
return start;
}
int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct ieee80211_network *network;
unsigned long flags;
char *ev = extra;
// char *stop = ev + IW_SCAN_MAX_DATA;
char *stop = ev + wrqu->data.length;//IW_SCAN_MAX_DATA;
//char *stop = ev + IW_SCAN_MAX_DATA;
int i = 0;
int err = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
down(&ieee->wx_sem);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
i++;
if((stop-ev)<200)
{
err = -E2BIG;
break;
}
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
else
IEEE80211_DEBUG_SCAN(
"Not showing network '%s ("
"%pM)' due to age (%lums).\n",
escape_essid(network->ssid,
network->ssid_len),
network->bssid,
(jiffies - network->last_scanned) / (HZ / 100));
}
spin_unlock_irqrestore(&ieee->lock, flags);
up(&ieee->wx_sem);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
return err;
}
int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &(wrqu->encoding);
struct net_device *dev = ieee->dev;
struct ieee80211_security sec = {
.flags = 0
};
int i, key, key_provided, len;
struct ieee80211_crypt_data **crypt;
IEEE80211_DEBUG_WX("SET_ENCODE\n");
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
key = ieee->tx_keyidx;
}
IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
crypt = &ieee->crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
key);
ieee80211_crypt_delayed_deinit(ieee, crypt);
} else
IEEE80211_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all */
for (i = 0; i < WEP_KEYS; i++) {
if (ieee->crypt[i] != NULL) {
if (key_provided)
break;
ieee80211_crypt_delayed_deinit(
ieee, &ieee->crypt[i]);
}
}
if (i == WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
}
goto done;
}
sec.enabled = 1;
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key */
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
if (*crypt == NULL) {
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
request_module("ieee80211_crypt_wep");
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
}
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(key);
if (!new_crypt->ops || !new_crypt->priv) {
kfree(new_crypt);
new_crypt = NULL;
printk(KERN_WARNING "%s: could not initialize WEP: "
"load module ieee80211_crypt_wep\n",
dev->name);
return -EOPNOTSUPP;
}
*crypt = new_crypt;
}
/* If a new key was provided, set it up */
if (erq->length > 0) {
len = erq->length <= 5 ? 5 : 13;
memcpy(sec.keys[key], keybuf, erq->length);
if (len > erq->length)
memset(sec.keys[key] + erq->length, 0,
len - erq->length);
IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
key, escape_essid(sec.keys[key], len),
erq->length, len);
sec.key_sizes[key] = len;
(*crypt)->ops->set_key(sec.keys[key], len, NULL,
(*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
ieee->tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
NULL, (*crypt)->priv);
if (len == 0) {
/* Set a default key of all 0 */
printk("Setting key %d to all zero.\n",
key);
IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
key);
memset(sec.keys[key], 0, 13);
(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
(*crypt)->priv);
sec.key_sizes[key] = 13;
sec.flags |= (1 << key);
}
/* No key data - just set the default TX key index */
if (key_provided) {
IEEE80211_DEBUG_WX(
"Setting key %d to default Tx key.\n", key);
ieee->tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
}
done:
ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
ieee->auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
sec.flags |= SEC_AUTH_MODE;
IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
"OPEN" : "SHARED KEY");
/* For now we just support WEP, so only set that security level...
* TODO: When WPA is added this is one place that needs to change */
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
if (ieee->set_security)
ieee->set_security(dev, &sec);
/* Do not reset port if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. If your hardware requires a reset after WEP
* configuration (for example... Prism2), implement the reset_port in
* the callbacks structures used to initialize the 802.11 stack. */
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
return -EINVAL;
}
return 0;
}
int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
struct ieee80211_crypt_data *crypt;
IEEE80211_DEBUG_WX("GET_ENCODE\n");
if(ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
} else
key = ieee->tx_keyidx;
crypt = ieee->crypt[key];
erq->flags = key + 1;
if (crypt == NULL || crypt->ops == NULL) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
len = crypt->ops->get_key(keybuf, SCM_KEY_LEN, NULL, crypt->priv);
erq->length = (len >= 0 ? len : 0);
erq->flags |= IW_ENCODE_ENABLED;
if (ieee->open_wep)
erq->flags |= IW_ENCODE_OPEN;
else
erq->flags |= IW_ENCODE_RESTRICTED;
return 0;
}
#if (WIRELESS_EXT >= 18)
int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct net_device *dev = ieee->dev;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int i, idx;
int group_key = 0;
const char *alg, *module;
struct ieee80211_crypto_ops *ops;
struct ieee80211_crypt_data **crypt;
struct ieee80211_security sec = {
.flags = 0,
};
//printk("======>encoding flag:%x,ext flag:%x, ext alg:%d\n", encoding->flags,ext->ext_flags, ext->alg);
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
return -EINVAL;
idx--;
} else
idx = ieee->tx_keyidx;
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
crypt = &ieee->crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
//printk("not group key, flags:%x, ext->alg:%d\n", ext->ext_flags, ext->alg);
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
crypt = &ieee->crypt[idx];
else
return -EINVAL;
}
sec.flags |= SEC_ENABLED;// | SEC_ENCRYPT;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
ieee80211_crypt_delayed_deinit(ieee, crypt);
for (i = 0; i < WEP_KEYS; i++)
if (ieee->crypt[i] != NULL)
break;
if (i == WEP_KEYS) {
sec.enabled = 0;
// sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
}
//printk("disabled: flag:%x\n", encoding->flags);
goto done;
}
sec.enabled = 1;
// sec.encrypt = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
alg = "WEP";
module = "ieee80211_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
alg = "TKIP";
module = "ieee80211_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
alg = "CCMP";
module = "ieee80211_crypt_ccmp";
break;
default:
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
ret = -EINVAL;
goto done;
}
printk("alg name:%s\n",alg);
ops = ieee80211_get_crypto_ops(alg);
if (ops == NULL) {
request_module(module);
ops = ieee80211_get_crypto_ops(alg);
}
if (ops == NULL) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
printk("========>unknown crypto alg %d\n", ext->alg);
ret = -EINVAL;
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
struct ieee80211_crypt_data *new_crypt;
ieee80211_crypt_delayed_deinit(ieee, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(idx);
if (new_crypt->priv == NULL) {
kfree(new_crypt);
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
if (ext->key_len > 0 && (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
printk("key setting failed\n");
ret = -EINVAL;
goto done;
}
//skip_host_crypt:
//printk("skip_host_crypt:ext_flags:%x\n", ext->ext_flags);
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
ieee->tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
if (ext->alg != IW_ENCODE_ALG_NONE) {
//memcpy(sec.keys[idx], ext->key, ext->key_len);
sec.key_sizes[idx] = ext->key_len;
sec.flags |= (1 << idx);
if (ext->alg == IW_ENCODE_ALG_WEP) {
// sec.encode_alg[idx] = SEC_ALG_WEP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
// sec.encode_alg[idx] = SEC_ALG_TKIP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
// sec.encode_alg[idx] = SEC_ALG_CCMP;
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
/* Don't set sec level for group keys. */
if (group_key)
sec.flags &= ~SEC_LEVEL;
}
done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
return -EINVAL;
}
return ret;
}
int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct ieee80211_crypt_data *crypt;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
if (max_key_len < 0)
return -EINVAL;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > WEP_KEYS)
return -EINVAL;
idx--;
} else
idx = ieee->tx_keyidx;
if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
ext->alg != IW_ENCODE_ALG_WEP)
if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
return -EINVAL;
crypt = ieee->crypt[idx];
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
if (crypt == NULL || crypt->ops == NULL ) {
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
if (strcmp(crypt->ops->name, "WEP") == 0 )
ext->alg = IW_ENCODE_ALG_WEP;
else if (strcmp(crypt->ops->name, "TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
else if (strcmp(crypt->ops->name, "CCMP"))
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
ext->key_len = crypt->ops->get_key(ext->key, SCM_KEY_LEN, NULL, crypt->priv);
encoding->flags |= IW_ENCODE_ENABLED;
if (ext->key_len &&
(ext->alg == IW_ENCODE_ALG_TKIP ||
ext->alg == IW_ENCODE_ALG_CCMP))
ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
}
return 0;
}
int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_mlme *mlme = (struct iw_mlme *) extra;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
ieee80211_disassociate(ieee);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
/*need to support wpa2 here*/
//printk("wpa version:%x\n", data->value);
break;
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/*
* * Host AP driver does not use these parameters and allows
* * wpa_supplicant to control them internally.
* */
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
ieee->tkip_countermeasures = data->value;
break;
case IW_AUTH_DROP_UNENCRYPTED:
ieee->drop_unencrypted = data->value;
break;
case IW_AUTH_80211_AUTH_ALG:
//printk("======>%s():data->value is %d\n",__FUNCTION__,data->value);
// ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM)?1:0;
if(data->value & IW_AUTH_ALG_SHARED_KEY){
ieee->open_wep = 0;
ieee->auth_mode = 1;
}
else if(data->value & IW_AUTH_ALG_OPEN_SYSTEM){
ieee->open_wep = 1;
ieee->auth_mode = 0;
}
else if(data->value & IW_AUTH_ALG_LEAP){
ieee->open_wep = 1;
ieee->auth_mode = 2;
//printk("hahahaa:LEAP\n");
}
else
return -EINVAL;
//printk("open_wep:%d\n", ieee->open_wep);
break;
case IW_AUTH_WPA_ENABLED:
ieee->wpa_enabled = (data->value)?1:0;
//printk("enalbe wpa:%d\n", ieee->wpa_enabled);
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
ieee->ieee802_1x = data->value;
break;
case IW_AUTH_PRIVACY_INVOKED:
ieee->privacy_invoked = data->value;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
#endif
int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
u8 *buf;
if (len>MAX_WPA_IE_LEN || (len && ie == NULL))
{
// printk("return error out, len:%d\n", len);
return -EINVAL;
}
if (len)
{
if (len != ie[1]+2)
{
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
}
else{
kfree(ieee->wpa_ie);
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
return 0;
}
EXPORT_SYMBOL(ieee80211_wx_set_gen_ie);
#if (WIRELESS_EXT >= 18)
EXPORT_SYMBOL(ieee80211_wx_set_mlme);
EXPORT_SYMBOL(ieee80211_wx_set_auth);
EXPORT_SYMBOL(ieee80211_wx_set_encode_ext);
EXPORT_SYMBOL(ieee80211_wx_get_encode_ext);
#endif
EXPORT_SYMBOL(ieee80211_wx_get_scan);
EXPORT_SYMBOL(ieee80211_wx_set_encode);
EXPORT_SYMBOL(ieee80211_wx_get_encode);
| gpl-2.0 |
meizuosc/m03x | drivers/ide/delkin_cb.c | 9804 | 4687 | /*
* Created 20 Oct 2004 by Mark Lord
*
* Basic support for Delkin/ASKA/Workbit Cardbus CompactFlash adapter
*
* Modeled after the 16-bit PCMCIA driver: ide-cs.c
*
* This is slightly peculiar, in that it is a PCI driver,
* but is NOT an IDE PCI driver -- the IDE layer does not directly
* support hot insertion/removal of PCI interfaces, so this driver
* is unable to use the IDE PCI interfaces. Instead, it uses the
* same interfaces as the ide-cs (PCMCIA) driver uses.
* On the plus side, the driver is also smaller/simpler this way.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/io.h>
/*
* No chip documentation has yet been found,
* so these configuration values were pulled from
* a running Win98 system using "debug".
* This gives around 3MByte/second read performance,
* which is about 2/3 of what the chip is capable of.
*
* There is also a 4KByte mmio region on the card,
* but its purpose has yet to be reverse-engineered.
*/
static const u8 setup[] = {
0x00, 0x05, 0xbe, 0x01, 0x20, 0x8f, 0x00, 0x00,
0xa4, 0x1f, 0xb3, 0x1b, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
};
static const struct ide_port_ops delkin_cb_port_ops = {
.quirkproc = ide_undecoded_slave,
};
static int delkin_cb_init_chipset(struct pci_dev *dev)
{
unsigned long base = pci_resource_start(dev, 0);
int i;
outb(0x02, base + 0x1e); /* set nIEN to block interrupts */
inb(base + 0x17); /* read status to clear interrupts */
for (i = 0; i < sizeof(setup); ++i) {
if (setup[i])
outb(setup[i], base + i);
}
return 0;
}
static const struct ide_port_info delkin_cb_port_info = {
.port_ops = &delkin_cb_port_ops,
.host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_UNMASK_IRQS |
IDE_HFLAG_NO_DMA,
.irq_flags = IRQF_SHARED,
.init_chipset = delkin_cb_init_chipset,
.chipset = ide_pci,
};
static int __devinit
delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_host *host;
unsigned long base;
int rc;
struct ide_hw hw, *hws[] = { &hw };
rc = pci_enable_device(dev);
if (rc) {
printk(KERN_ERR "delkin_cb: pci_enable_device failed (%d)\n", rc);
return rc;
}
rc = pci_request_regions(dev, "delkin_cb");
if (rc) {
printk(KERN_ERR "delkin_cb: pci_request_regions failed (%d)\n", rc);
pci_disable_device(dev);
return rc;
}
base = pci_resource_start(dev, 0);
delkin_cb_init_chipset(dev);
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
hw.irq = dev->irq;
hw.dev = &dev->dev;
rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
if (rc)
goto out_disable;
pci_set_drvdata(dev, host);
return 0;
out_disable:
pci_release_regions(dev);
pci_disable_device(dev);
return rc;
}
static void
delkin_cb_remove (struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
ide_host_remove(host);
pci_release_regions(dev);
pci_disable_device(dev);
}
#ifdef CONFIG_PM
static int delkin_cb_suspend(struct pci_dev *dev, pm_message_t state)
{
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
static int delkin_cb_resume(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
int rc;
pci_set_power_state(dev, PCI_D0);
rc = pci_enable_device(dev);
if (rc)
return rc;
pci_restore_state(dev);
pci_set_master(dev);
if (host->init_chipset)
host->init_chipset(dev);
return 0;
}
#else
#define delkin_cb_suspend NULL
#define delkin_cb_resume NULL
#endif
static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
static struct pci_driver delkin_cb_pci_driver = {
.name = "Delkin-ASKA-Workbit Cardbus IDE",
.id_table = delkin_cb_pci_tbl,
.probe = delkin_cb_probe,
.remove = delkin_cb_remove,
.suspend = delkin_cb_suspend,
.resume = delkin_cb_resume,
};
static int __init delkin_cb_init(void)
{
return pci_register_driver(&delkin_cb_pci_driver);
}
static void __exit delkin_cb_exit(void)
{
pci_unregister_driver(&delkin_cb_pci_driver);
}
module_init(delkin_cb_init);
module_exit(delkin_cb_exit);
MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
D2005-devs/android_kernel_msm8x10-old | arch/score/kernel/asm-offsets.c | 12364 | 6742 | /*
* arch/score/kernel/asm-offsets.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kbuild.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm-generic/cmpxchg-local.h>
void output_ptreg_defines(void)
{
COMMENT("SCORE pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_ORIG_R4, pt_regs, orig_r4);
OFFSET(PT_ORIG_R7, pt_regs, orig_r7);
OFFSET(PT_CEL, pt_regs, cel);
OFFSET(PT_CEH, pt_regs, ceh);
OFFSET(PT_SR0, pt_regs, sr0);
OFFSET(PT_SR1, pt_regs, sr1);
OFFSET(PT_SR2, pt_regs, sr2);
OFFSET(PT_EPC, pt_regs, cp0_epc);
OFFSET(PT_EMA, pt_regs, cp0_ema);
OFFSET(PT_PSR, pt_regs, cp0_psr);
OFFSET(PT_ECR, pt_regs, cp0_ecr);
OFFSET(PT_CONDITION, pt_regs, cp0_condition);
OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall);
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void)
{
COMMENT("SCORE task_struct offsets.");
OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void)
{
COMMENT("SCORE thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
DEFINE(KERNEL_STACK_MASK, THREAD_MASK);
BLANK();
}
void output_thread_defines(void)
{
COMMENT("SCORE specific thread_struct offsets.");
OFFSET(THREAD_REG0, task_struct, thread.reg0);
OFFSET(THREAD_REG2, task_struct, thread.reg2);
OFFSET(THREAD_REG3, task_struct, thread.reg3);
OFFSET(THREAD_REG12, task_struct, thread.reg12);
OFFSET(THREAD_REG13, task_struct, thread.reg13);
OFFSET(THREAD_REG14, task_struct, thread.reg14);
OFFSET(THREAD_REG15, task_struct, thread.reg15);
OFFSET(THREAD_REG16, task_struct, thread.reg16);
OFFSET(THREAD_REG17, task_struct, thread.reg17);
OFFSET(THREAD_REG18, task_struct, thread.reg18);
OFFSET(THREAD_REG19, task_struct, thread.reg19);
OFFSET(THREAD_REG20, task_struct, thread.reg20);
OFFSET(THREAD_REG21, task_struct, thread.reg21);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_PSR, task_struct, thread.cp0_psr);
OFFSET(THREAD_EMA, task_struct, thread.cp0_ema);
OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, thread.error_code);
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
BLANK();
}
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
}
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_MDCEH, sigcontext, sc_mdceh);
OFFSET(SC_MDCEL, sigcontext, sc_mdcel);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_PSR, sigcontext, sc_psr);
OFFSET(SC_ECR, sigcontext, sc_ecr);
OFFSET(SC_EMA, sigcontext, sc_ema);
BLANK();
}
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.